2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
310 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
332 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
366 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 /* Emit conversions so both operands of a binary opcode are of the same type */
462 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
464 MonoInst *arg1 = *arg1_ref;
465 MonoInst *arg2 = *arg2_ref;
468 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
469 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
472 /* Mixing r4/r8 is allowed by the spec */
473 if (arg1->type == STACK_R4) {
474 int dreg = alloc_freg (cfg);
476 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
477 conv->type = STACK_R8;
481 if (arg2->type == STACK_R4) {
482 int dreg = alloc_freg (cfg);
484 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
485 conv->type = STACK_R8;
491 #if SIZEOF_REGISTER == 8
492 /* FIXME: Need to add many more cases */
493 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
496 int dr = alloc_preg (cfg);
497 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
498 (ins)->sreg2 = widen->dreg;
503 #define ADD_BINOP(op) do { \
504 MONO_INST_NEW (cfg, ins, (op)); \
506 ins->sreg1 = sp [0]->dreg; \
507 ins->sreg2 = sp [1]->dreg; \
508 type_from_op (cfg, ins, sp [0], sp [1]); \
510 /* Have to insert a widening op */ \
511 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
512 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
517 #define ADD_UNOP(op) do { \
518 MONO_INST_NEW (cfg, ins, (op)); \
520 ins->sreg1 = sp [0]->dreg; \
521 type_from_op (cfg, ins, sp [0], NULL); \
523 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
524 MONO_ADD_INS ((cfg)->cbb, (ins)); \
525 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
528 #define ADD_BINCOND(next_block) do { \
531 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
532 cmp->sreg1 = sp [0]->dreg; \
533 cmp->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
537 type_from_op (cfg, ins, sp [0], sp [1]); \
538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
539 GET_BBLOCK (cfg, tblock, target); \
540 link_bblock (cfg, bblock, tblock); \
541 ins->inst_true_bb = tblock; \
542 if ((next_block)) { \
543 link_bblock (cfg, bblock, (next_block)); \
544 ins->inst_false_bb = (next_block); \
545 start_new_bblock = 1; \
547 GET_BBLOCK (cfg, tblock, ip); \
548 link_bblock (cfg, bblock, tblock); \
549 ins->inst_false_bb = tblock; \
550 start_new_bblock = 2; \
552 if (sp != stack_start) { \
553 handle_stack_args (cfg, stack_start, sp - stack_start); \
554 CHECK_UNVERIFIABLE (cfg); \
556 MONO_ADD_INS (bblock, cmp); \
557 MONO_ADD_INS (bblock, ins); \
561 * link_bblock: Links two basic blocks
563 * links two basic blocks in the control flow graph, the 'from'
564 * argument is the starting block and the 'to' argument is the block
565 * the control flow ends to after 'from'.
568 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
570 MonoBasicBlock **newa;
574 if (from->cil_code) {
576 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
581 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 printf ("edge from entry to exit\n");
588 for (i = 0; i < from->out_count; ++i) {
589 if (to == from->out_bb [i]) {
595 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
596 for (i = 0; i < from->out_count; ++i) {
597 newa [i] = from->out_bb [i];
605 for (i = 0; i < to->in_count; ++i) {
606 if (from == to->in_bb [i]) {
612 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
613 for (i = 0; i < to->in_count; ++i) {
614 newa [i] = to->in_bb [i];
623 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
625 link_bblock (cfg, from, to);
629 * mono_find_block_region:
631 * We mark each basic block with a region ID. We use that to avoid BB
632 * optimizations when blocks are in different regions.
635 * A region token that encodes where this region is, and information
636 * about the clause owner for this block.
638 * The region encodes the try/catch/filter clause that owns this block
639 * as well as the type. -1 is a special value that represents a block
640 * that is in none of try/catch/filter.
643 mono_find_block_region (MonoCompile *cfg, int offset)
645 MonoMethodHeader *header = cfg->header;
646 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
652 (offset < (clause->handler_offset)))
653 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
655 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
656 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
657 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
658 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
659 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (cfg, type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->generic_sharing_context);
801 if (mini_is_gsharedvt_type (cfg, type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1167 switch (args->type) {
1177 for (i = 0; i < sig->param_count; ++i) {
1178 switch (args [i].type) {
1182 if (!sig->params [i]->byref)
1186 if (sig->params [i]->byref)
1188 switch (sig->params [i]->type) {
1189 case MONO_TYPE_CLASS:
1190 case MONO_TYPE_STRING:
1191 case MONO_TYPE_OBJECT:
1192 case MONO_TYPE_SZARRAY:
1193 case MONO_TYPE_ARRAY:
1200 if (sig->params [i]->byref)
1202 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1211 /*if (!param_table [args [i].type] [sig->params [i]->type])
1219 * When we need a pointer to the current domain many times in a method, we
1220 * call mono_domain_get() once and we store the result in a local variable.
1221 * This function returns the variable that represents the MonoDomain*.
1223 inline static MonoInst *
1224 mono_get_domainvar (MonoCompile *cfg)
1226 if (!cfg->domainvar)
1227 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1228 return cfg->domainvar;
1232 * The got_var contains the address of the Global Offset Table when AOT
1236 mono_get_got_var (MonoCompile *cfg)
1238 #ifdef MONO_ARCH_NEED_GOT_VAR
1239 if (!cfg->compile_aot)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1251 mono_get_vtable_var (MonoCompile *cfg)
1253 g_assert (cfg->generic_sharing_context);
1255 if (!cfg->rgctx_var) {
1256 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 /* force the var to be stack allocated */
1258 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1261 return cfg->rgctx_var;
1265 type_from_stack_type (MonoInst *ins) {
1266 switch (ins->type) {
1267 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1268 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1269 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1270 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1271 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1273 return &ins->klass->this_arg;
1274 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1275 case STACK_VTYPE: return &ins->klass->byval_arg;
1277 g_error ("stack type %d to monotype not handled\n", ins->type);
1282 static G_GNUC_UNUSED int
1283 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1285 t = mono_type_get_underlying_type (t);
1297 case MONO_TYPE_FNPTR:
1299 case MONO_TYPE_CLASS:
1300 case MONO_TYPE_STRING:
1301 case MONO_TYPE_OBJECT:
1302 case MONO_TYPE_SZARRAY:
1303 case MONO_TYPE_ARRAY:
1309 return cfg->r4_stack_type;
1312 case MONO_TYPE_VALUETYPE:
1313 case MONO_TYPE_TYPEDBYREF:
1315 case MONO_TYPE_GENERICINST:
1316 if (mono_type_generic_inst_is_valuetype (t))
1322 g_assert_not_reached ();
1329 array_access_to_klass (int opcode)
1333 return mono_defaults.byte_class;
1335 return mono_defaults.uint16_class;
1338 return mono_defaults.int_class;
1341 return mono_defaults.sbyte_class;
1344 return mono_defaults.int16_class;
1347 return mono_defaults.int32_class;
1349 return mono_defaults.uint32_class;
1352 return mono_defaults.int64_class;
1355 return mono_defaults.single_class;
1358 return mono_defaults.double_class;
1359 case CEE_LDELEM_REF:
1360 case CEE_STELEM_REF:
1361 return mono_defaults.object_class;
1363 g_assert_not_reached ();
1369 * We try to share variables when possible
1372 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1377 /* inlining can result in deeper stacks */
1378 if (slot >= cfg->header->max_stack)
1379 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 pos = ins->type - 1 + slot * STACK_MAX;
1383 switch (ins->type) {
1390 if ((vnum = cfg->intvars [pos]))
1391 return cfg->varinfo [vnum];
1392 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 cfg->intvars [pos] = res->inst_c0;
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1402 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1405 * Don't use this if a generic_context is set, since that means AOT can't
1406 * look up the method using just the image+token.
1407 * table == 0 means this is a reference made from a wrapper.
1409 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1410 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1411 jump_info_token->image = image;
1412 jump_info_token->token = token;
1413 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1418 * This function is called to handle items that are left on the evaluation stack
1419 * at basic block boundaries. What happens is that we save the values to local variables
1420 * and we reload them later when first entering the target basic block (with the
1421 * handle_loaded_temps () function).
1422 * A single joint point will use the same variables (stored in the array bb->out_stack or
1423 * bb->in_stack, if the basic block is before or after the joint point).
1425 * This function needs to be called _before_ emitting the last instruction of
1426 * the bb (i.e. before emitting a branch).
1427 * If the stack merge fails at a join point, cfg->unverifiable is set.
1430 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1433 MonoBasicBlock *bb = cfg->cbb;
1434 MonoBasicBlock *outb;
1435 MonoInst *inst, **locals;
1440 if (cfg->verbose_level > 3)
1441 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1442 if (!bb->out_scount) {
1443 bb->out_scount = count;
1444 //printf ("bblock %d has out:", bb->block_num);
1446 for (i = 0; i < bb->out_count; ++i) {
1447 outb = bb->out_bb [i];
1448 /* exception handlers are linked, but they should not be considered for stack args */
1449 if (outb->flags & BB_EXCEPTION_HANDLER)
1451 //printf (" %d", outb->block_num);
1452 if (outb->in_stack) {
1454 bb->out_stack = outb->in_stack;
1460 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1461 for (i = 0; i < count; ++i) {
1463 * try to reuse temps already allocated for this purpouse, if they occupy the same
1464 * stack slot and if they are of the same type.
1465 * This won't cause conflicts since if 'local' is used to
1466 * store one of the values in the in_stack of a bblock, then
1467 * the same variable will be used for the same outgoing stack
1469 * This doesn't work when inlining methods, since the bblocks
1470 * in the inlined methods do not inherit their in_stack from
1471 * the bblock they are inlined to. See bug #58863 for an
1474 if (cfg->inlined_method)
1475 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1477 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1482 for (i = 0; i < bb->out_count; ++i) {
1483 outb = bb->out_bb [i];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER)
1487 if (outb->in_scount) {
1488 if (outb->in_scount != bb->out_scount) {
1489 cfg->unverifiable = TRUE;
1492 continue; /* check they are the same locals */
1494 outb->in_scount = count;
1495 outb->in_stack = bb->out_stack;
1498 locals = bb->out_stack;
1500 for (i = 0; i < count; ++i) {
1501 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1502 inst->cil_code = sp [i]->cil_code;
1503 sp [i] = locals [i];
1504 if (cfg->verbose_level > 3)
1505 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1509 * It is possible that the out bblocks already have in_stack assigned, and
1510 * the in_stacks differ. In this case, we will store to all the different
1517 /* Find a bblock which has a different in_stack */
1519 while (bindex < bb->out_count) {
1520 outb = bb->out_bb [bindex];
1521 /* exception handlers are linked, but they should not be considered for stack args */
1522 if (outb->flags & BB_EXCEPTION_HANDLER) {
1526 if (outb->in_stack != locals) {
1527 for (i = 0; i < count; ++i) {
1528 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1529 inst->cil_code = sp [i]->cil_code;
1530 sp [i] = locals [i];
1531 if (cfg->verbose_level > 3)
1532 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1534 locals = outb->in_stack;
1543 /* Emit code which loads interface_offsets [klass->interface_id]
1544 * The array is stored in memory before vtable.
1547 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1549 if (cfg->compile_aot) {
1550 int ioffset_reg = alloc_preg (cfg);
1551 int iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1554 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1563 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1565 int ibitmap_reg = alloc_preg (cfg);
1566 #ifdef COMPRESSED_INTERFACE_BITMAP
1568 MonoInst *res, *ins;
1569 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1570 MONO_ADD_INS (cfg->cbb, ins);
1572 if (cfg->compile_aot)
1573 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1575 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1576 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1577 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1579 int ibitmap_byte_reg = alloc_preg (cfg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1583 if (cfg->compile_aot) {
1584 int iid_reg = alloc_preg (cfg);
1585 int shifted_iid_reg = alloc_preg (cfg);
1586 int ibitmap_byte_address_reg = alloc_preg (cfg);
1587 int masked_iid_reg = alloc_preg (cfg);
1588 int iid_one_bit_reg = alloc_preg (cfg);
1589 int iid_bit_reg = alloc_preg (cfg);
1590 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1592 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1593 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1595 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1596 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1606 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1607 * stored in "klass_reg" implements the interface "klass".
1610 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1612 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1616 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1617 * stored in "vtable_reg" implements the interface "klass".
1620 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1622 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1626 * Emit code which checks whenever the interface id of @klass is smaller than
1627 * than the value given by max_iid_reg.
1630 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1631 MonoBasicBlock *false_target)
1633 if (cfg->compile_aot) {
1634 int iid_reg = alloc_preg (cfg);
1635 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1636 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1643 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1646 /* Same as above, but obtains max_iid from a vtable */
1648 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1649 MonoBasicBlock *false_target)
1651 int max_iid_reg = alloc_preg (cfg);
1653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1654 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1657 /* Same as above, but obtains max_iid from a klass */
1659 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1660 MonoBasicBlock *false_target)
1662 int max_iid_reg = alloc_preg (cfg);
1664 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1665 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1669 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1671 int idepth_reg = alloc_preg (cfg);
1672 int stypes_reg = alloc_preg (cfg);
1673 int stype = alloc_preg (cfg);
1675 mono_class_setup_supertypes (klass);
1677 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1686 } else if (cfg->compile_aot) {
1687 int const_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1689 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1697 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1699 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1703 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 int intf_reg = alloc_preg (cfg);
1707 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1708 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1713 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1717 * Variant of the above that takes a register to the class, not the vtable.
1720 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1722 int intf_bit_reg = alloc_preg (cfg);
1724 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1725 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1730 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1734 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1737 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1738 } else if (cfg->compile_aot) {
1739 int const_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1745 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1749 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1751 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1755 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1757 if (cfg->compile_aot) {
1758 int const_reg = alloc_preg (cfg);
1759 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1760 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1762 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1768 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1771 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1774 int rank_reg = alloc_preg (cfg);
1775 int eclass_reg = alloc_preg (cfg);
1777 g_assert (!klass_inst);
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1780 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1781 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1783 if (klass->cast_class == mono_defaults.object_class) {
1784 int parent_reg = alloc_preg (cfg);
1785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1786 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1787 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1788 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1789 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class == mono_defaults.enum_class) {
1792 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1793 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1794 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1796 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1797 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1800 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1801 /* Check that the object is a vector too */
1802 int bounds_reg = alloc_preg (cfg);
1803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1805 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1808 int idepth_reg = alloc_preg (cfg);
1809 int stypes_reg = alloc_preg (cfg);
1810 int stype = alloc_preg (cfg);
1812 mono_class_setup_supertypes (klass);
1814 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1815 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1817 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1821 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1826 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1828 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1832 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1836 g_assert (val == 0);
1841 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1844 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1852 #if SIZEOF_REGISTER == 8
1854 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1860 val_reg = alloc_preg (cfg);
1862 if (SIZEOF_REGISTER == 8)
1863 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1865 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1868 /* This could be optimized further if neccesary */
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 #if !NO_UNALIGNED_ACCESS
1878 if (SIZEOF_REGISTER == 8) {
1880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1910 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1917 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1918 g_assert (size < 10000);
1921 /* This could be optimized further if neccesary */
1923 cur_reg = alloc_preg (cfg);
1924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1932 #if !NO_UNALIGNED_ACCESS
1933 if (SIZEOF_REGISTER == 8) {
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1946 cur_reg = alloc_preg (cfg);
1947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1954 cur_reg = alloc_preg (cfg);
1955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1962 cur_reg = alloc_preg (cfg);
1963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1972 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1976 if (cfg->compile_aot) {
1977 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1980 ins->sreg2 = c->dreg;
1981 MONO_ADD_INS (cfg->cbb, ins);
1983 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1985 ins->inst_offset = mini_get_tls_offset (tls_key);
1986 MONO_ADD_INS (cfg->cbb, ins);
1993 * Emit IR to push the current LMF onto the LMF stack.
1996 emit_push_lmf (MonoCompile *cfg)
1999 * Emit IR to push the LMF:
2000 * lmf_addr = <lmf_addr from tls>
2001 * lmf->lmf_addr = lmf_addr
2002 * lmf->prev_lmf = *lmf_addr
2005 int lmf_reg, prev_lmf_reg;
2006 MonoInst *ins, *lmf_ins;
2011 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2012 /* Load current lmf */
2013 lmf_ins = mono_get_lmf_intrinsic (cfg);
2015 MONO_ADD_INS (cfg->cbb, lmf_ins);
2016 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2017 lmf_reg = ins->dreg;
2018 /* Save previous_lmf */
2019 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2021 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2024 * Store lmf_addr in a variable, so it can be allocated to a global register.
2026 if (!cfg->lmf_addr_var)
2027 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2030 ins = mono_get_jit_tls_intrinsic (cfg);
2032 int jit_tls_dreg = ins->dreg;
2034 MONO_ADD_INS (cfg->cbb, ins);
2035 lmf_reg = alloc_preg (cfg);
2036 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2038 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2041 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2043 MONO_ADD_INS (cfg->cbb, lmf_ins);
2046 MonoInst *args [16], *jit_tls_ins, *ins;
2048 /* Inline mono_get_lmf_addr () */
2049 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2051 /* Load mono_jit_tls_id */
2052 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2053 /* call pthread_getspecific () */
2054 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2055 /* lmf_addr = &jit_tls->lmf */
2056 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2059 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2063 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2065 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2066 lmf_reg = ins->dreg;
2068 prev_lmf_reg = alloc_preg (cfg);
2069 /* Save previous_lmf */
2070 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2073 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2080 * Emit IR to pop the current LMF from the LMF stack.
2083 emit_pop_lmf (MonoCompile *cfg)
2085 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2091 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2092 lmf_reg = ins->dreg;
2094 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2095 /* Load previous_lmf */
2096 prev_lmf_reg = alloc_preg (cfg);
2097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2099 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2102 * Emit IR to pop the LMF:
2103 * *(lmf->lmf_addr) = lmf->prev_lmf
2105 /* This could be called before emit_push_lmf () */
2106 if (!cfg->lmf_addr_var)
2107 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2108 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2110 prev_lmf_reg = alloc_preg (cfg);
2111 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2112 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2117 emit_instrumentation_call (MonoCompile *cfg, void *func)
2119 MonoInst *iargs [1];
2122 * Avoid instrumenting inlined methods since it can
2123 * distort profiling results.
2125 if (cfg->method != cfg->current_method)
2128 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2129 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2130 mono_emit_jit_icall (cfg, func, iargs);
2135 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2138 type = mini_get_underlying_type (cfg, type);
2139 switch (type->type) {
2140 case MONO_TYPE_VOID:
2141 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2148 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_FNPTR:
2153 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2154 case MONO_TYPE_CLASS:
2155 case MONO_TYPE_STRING:
2156 case MONO_TYPE_OBJECT:
2157 case MONO_TYPE_SZARRAY:
2158 case MONO_TYPE_ARRAY:
2159 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2162 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2165 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2169 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2170 case MONO_TYPE_VALUETYPE:
2171 if (type->data.klass->enumtype) {
2172 type = mono_class_enum_basetype (type->data.klass);
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_TYPEDBYREF:
2177 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2178 case MONO_TYPE_GENERICINST:
2179 type = &type->data.generic_class->container_class->byval_arg;
2182 case MONO_TYPE_MVAR:
2184 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2186 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2192 * target_type_is_incompatible:
2193 * @cfg: MonoCompile context
2195 * Check that the item @arg on the evaluation stack can be stored
2196 * in the target type (can be a local, or field, etc).
2197 * The cfg arg can be used to check if we need verification or just
2200 * Returns: non-0 value if arg can't be stored on a target.
2203 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2205 MonoType *simple_type;
2208 if (target->byref) {
2209 /* FIXME: check that the pointed to types match */
2210 if (arg->type == STACK_MP)
2211 return arg->klass != mono_class_from_mono_type (target);
2212 if (arg->type == STACK_PTR)
2217 simple_type = mini_get_underlying_type (cfg, target);
2218 switch (simple_type->type) {
2219 case MONO_TYPE_VOID:
2227 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2231 /* STACK_MP is needed when setting pinned locals */
2232 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2237 case MONO_TYPE_FNPTR:
2239 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2240 * in native int. (#688008).
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2245 case MONO_TYPE_CLASS:
2246 case MONO_TYPE_STRING:
2247 case MONO_TYPE_OBJECT:
2248 case MONO_TYPE_SZARRAY:
2249 case MONO_TYPE_ARRAY:
2250 if (arg->type != STACK_OBJ)
2252 /* FIXME: check type compatibility */
2256 if (arg->type != STACK_I8)
2260 if (arg->type != cfg->r4_stack_type)
2264 if (arg->type != STACK_R8)
2267 case MONO_TYPE_VALUETYPE:
2268 if (arg->type != STACK_VTYPE)
2270 klass = mono_class_from_mono_type (simple_type);
2271 if (klass != arg->klass)
2274 case MONO_TYPE_TYPEDBYREF:
2275 if (arg->type != STACK_VTYPE)
2277 klass = mono_class_from_mono_type (simple_type);
2278 if (klass != arg->klass)
2281 case MONO_TYPE_GENERICINST:
2282 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2283 if (arg->type != STACK_VTYPE)
2285 klass = mono_class_from_mono_type (simple_type);
2286 if (klass != arg->klass)
2290 if (arg->type != STACK_OBJ)
2292 /* FIXME: check type compatibility */
2296 case MONO_TYPE_MVAR:
2297 g_assert (cfg->generic_sharing_context);
2298 if (mini_type_var_is_vt (cfg, simple_type)) {
2299 if (arg->type != STACK_VTYPE)
2302 if (arg->type != STACK_OBJ)
2307 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2313 * Prepare arguments for passing to a function call.
2314 * Return a non-zero value if the arguments can't be passed to the given
2316 * The type checks are not yet complete and some conversions may need
2317 * casts on 32 or 64 bit architectures.
2319 * FIXME: implement this using target_type_is_incompatible ()
2322 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2324 MonoType *simple_type;
2328 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2332 for (i = 0; i < sig->param_count; ++i) {
2333 if (sig->params [i]->byref) {
2334 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2338 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2340 switch (simple_type->type) {
2341 case MONO_TYPE_VOID:
2350 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2356 case MONO_TYPE_FNPTR:
2357 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2360 case MONO_TYPE_CLASS:
2361 case MONO_TYPE_STRING:
2362 case MONO_TYPE_OBJECT:
2363 case MONO_TYPE_SZARRAY:
2364 case MONO_TYPE_ARRAY:
2365 if (args [i]->type != STACK_OBJ)
2370 if (args [i]->type != STACK_I8)
2374 if (args [i]->type != cfg->r4_stack_type)
2378 if (args [i]->type != STACK_R8)
2381 case MONO_TYPE_VALUETYPE:
2382 if (simple_type->data.klass->enumtype) {
2383 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2386 if (args [i]->type != STACK_VTYPE)
2389 case MONO_TYPE_TYPEDBYREF:
2390 if (args [i]->type != STACK_VTYPE)
2393 case MONO_TYPE_GENERICINST:
2394 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2397 case MONO_TYPE_MVAR:
2399 if (args [i]->type != STACK_VTYPE)
2403 g_error ("unknown type 0x%02x in check_call_signature",
2411 callvirt_to_call (int opcode)
2414 case OP_CALL_MEMBASE:
2416 case OP_VOIDCALL_MEMBASE:
2418 case OP_FCALL_MEMBASE:
2420 case OP_RCALL_MEMBASE:
2422 case OP_VCALL_MEMBASE:
2424 case OP_LCALL_MEMBASE:
2427 g_assert_not_reached ();
2433 /* Either METHOD or IMT_ARG needs to be set */
2435 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2439 if (COMPILE_LLVM (cfg)) {
2440 method_reg = alloc_preg (cfg);
2443 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2444 } else if (cfg->compile_aot) {
2445 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2448 MONO_INST_NEW (cfg, ins, OP_PCONST);
2449 ins->inst_p0 = method;
2450 ins->dreg = method_reg;
2451 MONO_ADD_INS (cfg->cbb, ins);
2455 call->imt_arg_reg = method_reg;
2457 #ifdef MONO_ARCH_IMT_REG
2458 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2460 /* Need this to keep the IMT arg alive */
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2466 #ifdef MONO_ARCH_IMT_REG
2467 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2471 } else if (cfg->compile_aot) {
2472 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2475 MONO_INST_NEW (cfg, ins, OP_PCONST);
2476 ins->inst_p0 = method;
2477 ins->dreg = method_reg;
2478 MONO_ADD_INS (cfg->cbb, ins);
2481 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2483 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2487 static MonoJumpInfo *
2488 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2490 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2494 ji->data.target = target;
2500 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2502 if (cfg->generic_sharing_context)
2503 return mono_class_check_context_used (klass);
2509 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2511 if (cfg->generic_sharing_context)
2512 return mono_method_check_context_used (method);
2518 * check_method_sharing:
2520 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2523 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2525 gboolean pass_vtable = FALSE;
2526 gboolean pass_mrgctx = FALSE;
2528 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2529 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2530 gboolean sharable = FALSE;
2532 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2535 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2536 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2537 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2539 sharable = sharing_enabled && context_sharable;
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2560 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2561 MonoGenericContext *context = mini_method_get_context (cmethod);
2562 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2564 if (sharing_enabled && context_sharable)
2566 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2571 if (out_pass_vtable)
2572 *out_pass_vtable = pass_vtable;
2573 if (out_pass_mrgctx)
2574 *out_pass_mrgctx = pass_mrgctx;
2577 inline static MonoCallInst *
2578 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2579 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2583 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2588 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2590 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2592 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2595 call->signature = sig;
2596 call->rgctx_reg = rgctx;
2597 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2599 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2602 if (mini_type_is_vtype (cfg, sig_ret)) {
2603 call->vret_var = cfg->vret_addr;
2604 //g_assert_not_reached ();
2606 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2607 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2610 temp->backend.is_pinvoke = sig->pinvoke;
2613 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2614 * address of return value to increase optimization opportunities.
2615 * Before vtype decomposition, the dreg of the call ins itself represents the
2616 * fact the call modifies the return value. After decomposition, the call will
2617 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2618 * will be transformed into an LDADDR.
2620 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2621 loada->dreg = alloc_preg (cfg);
2622 loada->inst_p0 = temp;
2623 /* We reference the call too since call->dreg could change during optimization */
2624 loada->inst_p1 = call;
2625 MONO_ADD_INS (cfg->cbb, loada);
2627 call->inst.dreg = temp->dreg;
2629 call->vret_var = loada;
2630 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2631 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2633 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2634 if (COMPILE_SOFT_FLOAT (cfg)) {
2636 * If the call has a float argument, we would need to do an r8->r4 conversion using
2637 * an icall, but that cannot be done during the call sequence since it would clobber
2638 * the call registers + the stack. So we do it before emitting the call.
2640 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2642 MonoInst *in = call->args [i];
2644 if (i >= sig->hasthis)
2645 t = sig->params [i - sig->hasthis];
2647 t = &mono_defaults.int_class->byval_arg;
2648 t = mono_type_get_underlying_type (t);
2650 if (!t->byref && t->type == MONO_TYPE_R4) {
2651 MonoInst *iargs [1];
2655 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2657 /* The result will be in an int vreg */
2658 call->args [i] = conv;
2664 call->need_unbox_trampoline = unbox_trampoline;
2667 if (COMPILE_LLVM (cfg))
2668 mono_llvm_emit_call (cfg, call);
2670 mono_arch_emit_call (cfg, call);
2672 mono_arch_emit_call (cfg, call);
2675 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2676 cfg->flags |= MONO_CFG_HAS_CALLS;
2682 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2684 #ifdef MONO_ARCH_RGCTX_REG
2685 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2686 cfg->uses_rgctx_reg = TRUE;
2687 call->rgctx_reg = TRUE;
2689 call->rgctx_arg_reg = rgctx_reg;
2696 inline static MonoInst*
2697 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2702 gboolean check_sp = FALSE;
2704 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2705 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2707 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2712 rgctx_reg = mono_alloc_preg (cfg);
2713 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2717 if (!cfg->stack_inbalance_var)
2718 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2720 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2721 ins->dreg = cfg->stack_inbalance_var->dreg;
2722 MONO_ADD_INS (cfg->cbb, ins);
2725 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2727 call->inst.sreg1 = addr->dreg;
2730 emit_imt_argument (cfg, call, NULL, imt_arg);
2732 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2737 sp_reg = mono_alloc_preg (cfg);
2739 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2741 MONO_ADD_INS (cfg->cbb, ins);
2743 /* Restore the stack so we don't crash when throwing the exception */
2744 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2745 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2746 MONO_ADD_INS (cfg->cbb, ins);
2748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2753 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2755 return (MonoInst*)call;
2759 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2762 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2764 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2767 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2768 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2770 #ifndef DISABLE_REMOTING
2771 gboolean might_be_remote = FALSE;
2773 gboolean virtual = this != NULL;
2774 gboolean enable_for_aot = TRUE;
2778 gboolean need_unbox_trampoline;
2781 sig = mono_method_signature (method);
2784 rgctx_reg = mono_alloc_preg (cfg);
2785 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2788 if (method->string_ctor) {
2789 /* Create the real signature */
2790 /* FIXME: Cache these */
2791 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2792 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2797 context_used = mini_method_check_context_used (cfg, method);
2799 #ifndef DISABLE_REMOTING
2800 might_be_remote = this && sig->hasthis &&
2801 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2802 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2804 if (might_be_remote && context_used) {
2807 g_assert (cfg->generic_sharing_context);
2809 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2811 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this->dreg;
2835 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2897 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2900 guint32 imt_slot = mono_method_get_imt_slot (method);
2901 emit_imt_argument (cfg, call, call->method, imt_arg);
2902 slot_reg = vtable_reg;
2903 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2905 if (slot_reg == -1) {
2906 slot_reg = alloc_preg (cfg);
2907 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2908 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2911 slot_reg = vtable_reg;
2912 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2913 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2915 g_assert (mono_method_signature (method)->generic_param_count);
2916 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 call->inst.sreg1 = slot_reg;
2921 call->inst.inst_offset = offset;
2922 call->virtual = TRUE;
2926 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2929 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2931 return (MonoInst*)call;
2935 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2937 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2941 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2948 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2951 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2953 return (MonoInst*)call;
2957 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2959 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2963 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2967 * mono_emit_abs_call:
2969 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2971 inline static MonoInst*
2972 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2973 MonoMethodSignature *sig, MonoInst **args)
2975 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2979 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2982 if (cfg->abs_patches == NULL)
2983 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2984 g_hash_table_insert (cfg->abs_patches, ji, ji);
2985 ins = mono_emit_native_call (cfg, ji, sig, args);
2986 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2991 direct_icalls_enabled (MonoCompile *cfg)
2993 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2995 if (cfg->compile_llvm)
2998 if (cfg->gen_seq_points_debug_data || cfg->disable_direct_icalls)
3004 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3007 * Call the jit icall without a wrapper if possible.
3008 * The wrapper is needed for the following reasons:
3009 * - to handle exceptions thrown using mono_raise_exceptions () from the
3010 * icall function. The EH code needs the lmf frame pushed by the
3011 * wrapper to be able to unwind back to managed code.
3012 * - to be able to do stack walks for asynchronously suspended
3013 * threads when debugging.
3015 if (info->no_raise && direct_icalls_enabled (cfg)) {
3019 if (!info->wrapper_method) {
3020 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3021 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3023 mono_memory_barrier ();
3027 * Inline the wrapper method, which is basically a call to the C icall, and
3028 * an exception check.
3030 costs = inline_method (cfg, info->wrapper_method, NULL,
3031 args, NULL, cfg->real_offset, TRUE, out_cbb);
3032 g_assert (costs > 0);
3033 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3037 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3042 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3044 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3045 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3049 * Native code might return non register sized integers
3050 * without initializing the upper bits.
3052 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3053 case OP_LOADI1_MEMBASE:
3054 widen_op = OP_ICONV_TO_I1;
3056 case OP_LOADU1_MEMBASE:
3057 widen_op = OP_ICONV_TO_U1;
3059 case OP_LOADI2_MEMBASE:
3060 widen_op = OP_ICONV_TO_I2;
3062 case OP_LOADU2_MEMBASE:
3063 widen_op = OP_ICONV_TO_U2;
3069 if (widen_op != -1) {
3070 int dreg = alloc_preg (cfg);
3073 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3074 widen->type = ins->type;
3084 get_memcpy_method (void)
3086 static MonoMethod *memcpy_method = NULL;
3087 if (!memcpy_method) {
3088 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3090 g_error ("Old corlib found. Install a new one");
3092 return memcpy_method;
3096 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3098 MonoClassField *field;
3099 gpointer iter = NULL;
3101 while ((field = mono_class_get_fields (klass, &iter))) {
3104 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3106 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3107 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3108 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3109 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3111 MonoClass *field_class = mono_class_from_mono_type (field->type);
3112 if (field_class->has_references)
3113 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3119 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3121 int card_table_shift_bits;
3122 gpointer card_table_mask;
3124 MonoInst *dummy_use;
3125 int nursery_shift_bits;
3126 size_t nursery_size;
3127 gboolean has_card_table_wb = FALSE;
3129 if (!cfg->gen_write_barriers)
3132 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3134 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3136 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3137 has_card_table_wb = TRUE;
3140 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3143 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3144 wbarrier->sreg1 = ptr->dreg;
3145 wbarrier->sreg2 = value->dreg;
3146 MONO_ADD_INS (cfg->cbb, wbarrier);
3147 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3148 int offset_reg = alloc_preg (cfg);
3149 int card_reg = alloc_preg (cfg);
3152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3153 if (card_table_mask)
3154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3156 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3157 * IMM's larger than 32bits.
3159 if (cfg->compile_aot) {
3160 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3162 MONO_INST_NEW (cfg, ins, OP_PCONST);
3163 ins->inst_p0 = card_table;
3164 ins->dreg = card_reg;
3165 MONO_ADD_INS (cfg->cbb, ins);
3168 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3169 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3171 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3172 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3175 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3179 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3181 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3182 unsigned need_wb = 0;
3187 /*types with references can't have alignment smaller than sizeof(void*) */
3188 if (align < SIZEOF_VOID_P)
3191 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3192 if (size > 32 * SIZEOF_VOID_P)
3195 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3197 /* We don't unroll more than 5 stores to avoid code bloat. */
3198 if (size > 5 * SIZEOF_VOID_P) {
3199 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3200 size += (SIZEOF_VOID_P - 1);
3201 size &= ~(SIZEOF_VOID_P - 1);
3203 EMIT_NEW_ICONST (cfg, iargs [2], size);
3204 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3205 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3209 destreg = iargs [0]->dreg;
3210 srcreg = iargs [1]->dreg;
3213 dest_ptr_reg = alloc_preg (cfg);
3214 tmp_reg = alloc_preg (cfg);
3217 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3219 while (size >= SIZEOF_VOID_P) {
3220 MonoInst *load_inst;
3221 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3222 load_inst->dreg = tmp_reg;
3223 load_inst->inst_basereg = srcreg;
3224 load_inst->inst_offset = offset;
3225 MONO_ADD_INS (cfg->cbb, load_inst);
3227 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3230 emit_write_barrier (cfg, iargs [0], load_inst);
3232 offset += SIZEOF_VOID_P;
3233 size -= SIZEOF_VOID_P;
3236 /*tmp += sizeof (void*)*/
3237 if (size >= SIZEOF_VOID_P) {
3238 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3239 MONO_ADD_INS (cfg->cbb, iargs [0]);
3243 /* Those cannot be references since size < sizeof (void*) */
3245 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3246 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3252 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3259 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3260 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3269 * Emit code to copy a valuetype of type @klass whose address is stored in
3270 * @src->dreg to memory whose address is stored at @dest->dreg.
3273 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3275 MonoInst *iargs [4];
3278 MonoMethod *memcpy_method;
3279 MonoInst *size_ins = NULL;
3280 MonoInst *memcpy_ins = NULL;
3283 if (cfg->generic_sharing_context)
3284 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3287 * This check breaks with spilled vars... need to handle it during verification anyway.
3288 * g_assert (klass && klass == src->klass && klass == dest->klass);
3291 if (mini_is_gsharedvt_klass (cfg, klass)) {
3293 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3294 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3298 n = mono_class_native_size (klass, &align);
3300 n = mono_class_value_size (klass, &align);
3302 /* if native is true there should be no references in the struct */
3303 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3304 /* Avoid barriers when storing to the stack */
3305 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3306 (dest->opcode == OP_LDADDR))) {
3312 context_used = mini_class_check_context_used (cfg, klass);
3314 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3315 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3317 } else if (context_used) {
3318 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3320 if (cfg->compile_aot) {
3321 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3323 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3324 mono_class_compute_gc_descriptor (klass);
3329 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3331 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3336 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3337 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3338 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3343 iargs [2] = size_ins;
3345 EMIT_NEW_ICONST (cfg, iargs [2], n);
3347 memcpy_method = get_memcpy_method ();
3349 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3351 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3356 get_memset_method (void)
3358 static MonoMethod *memset_method = NULL;
3359 if (!memset_method) {
3360 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3362 g_error ("Old corlib found. Install a new one");
3364 return memset_method;
3368 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3370 MonoInst *iargs [3];
3373 MonoMethod *memset_method;
3374 MonoInst *size_ins = NULL;
3375 MonoInst *bzero_ins = NULL;
3376 static MonoMethod *bzero_method;
3378 /* FIXME: Optimize this for the case when dest is an LDADDR */
3379 mono_class_init (klass);
3380 if (mini_is_gsharedvt_klass (cfg, klass)) {
3381 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3382 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3384 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3385 g_assert (bzero_method);
3387 iargs [1] = size_ins;
3388 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3392 n = mono_class_value_size (klass, &align);
3394 if (n <= sizeof (gpointer) * 8) {
3395 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3398 memset_method = get_memset_method ();
3400 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3401 EMIT_NEW_ICONST (cfg, iargs [2], n);
3402 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3407 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3409 MonoInst *this = NULL;
3411 g_assert (cfg->generic_sharing_context);
3413 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3414 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3415 !method->klass->valuetype)
3416 EMIT_NEW_ARGLOAD (cfg, this, 0);
3418 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3419 MonoInst *mrgctx_loc, *mrgctx_var;
3422 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3424 mrgctx_loc = mono_get_vtable_var (cfg);
3425 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3428 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3429 MonoInst *vtable_loc, *vtable_var;
3433 vtable_loc = mono_get_vtable_var (cfg);
3434 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3436 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3437 MonoInst *mrgctx_var = vtable_var;
3440 vtable_reg = alloc_preg (cfg);
3441 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3442 vtable_var->type = STACK_PTR;
3450 vtable_reg = alloc_preg (cfg);
3451 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3456 static MonoJumpInfoRgctxEntry *
3457 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3459 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3460 res->method = method;
3461 res->in_mrgctx = in_mrgctx;
3462 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3463 res->data->type = patch_type;
3464 res->data->data.target = patch_data;
3465 res->info_type = info_type;
3470 static inline MonoInst*
3471 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3473 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3477 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3478 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3480 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3481 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3483 return emit_rgctx_fetch (cfg, rgctx, entry);
3487 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3488 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3490 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3491 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3493 return emit_rgctx_fetch (cfg, rgctx, entry);
3497 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3498 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3500 MonoJumpInfoGSharedVtCall *call_info;
3501 MonoJumpInfoRgctxEntry *entry;
3504 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3505 call_info->sig = sig;
3506 call_info->method = cmethod;
3508 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3509 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3511 return emit_rgctx_fetch (cfg, rgctx, entry);
3516 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3517 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3519 MonoJumpInfoRgctxEntry *entry;
3522 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3523 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3525 return emit_rgctx_fetch (cfg, rgctx, entry);
3529 * emit_get_rgctx_method:
3531 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3532 * normal constants, else emit a load from the rgctx.
3535 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3536 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3538 if (!context_used) {
3541 switch (rgctx_type) {
3542 case MONO_RGCTX_INFO_METHOD:
3543 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3545 case MONO_RGCTX_INFO_METHOD_RGCTX:
3546 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3549 g_assert_not_reached ();
3552 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3553 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3555 return emit_rgctx_fetch (cfg, rgctx, entry);
3560 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3561 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3563 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3564 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3566 return emit_rgctx_fetch (cfg, rgctx, entry);
3570 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3572 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3573 MonoRuntimeGenericContextInfoTemplate *template;
3578 for (i = 0; i < info->num_entries; ++i) {
3579 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3581 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3585 if (info->num_entries == info->count_entries) {
3586 MonoRuntimeGenericContextInfoTemplate *new_entries;
3587 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3589 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3591 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3592 info->entries = new_entries;
3593 info->count_entries = new_count_entries;
3596 idx = info->num_entries;
3597 template = &info->entries [idx];
3598 template->info_type = rgctx_type;
3599 template->data = data;
3601 info->num_entries ++;
3607 * emit_get_gsharedvt_info:
3609 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3612 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3617 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3618 /* Load info->entries [idx] */
3619 dreg = alloc_preg (cfg);
3620 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3626 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3628 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3632 * On return the caller must check @klass for load errors.
3635 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3637 MonoInst *vtable_arg;
3641 context_used = mini_class_check_context_used (cfg, klass);
3644 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3645 klass, MONO_RGCTX_INFO_VTABLE);
3647 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3651 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3654 if (COMPILE_LLVM (cfg))
3655 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3657 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3658 #ifdef MONO_ARCH_VTABLE_REG
3659 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3660 cfg->uses_vtable_reg = TRUE;
3667 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3671 if (cfg->gen_seq_points && cfg->method == method) {
3672 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3674 ins->flags |= MONO_INST_NONEMPTY_STACK;
3675 MONO_ADD_INS (cfg->cbb, ins);
3680 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3682 if (mini_get_debug_options ()->better_cast_details) {
3683 int vtable_reg = alloc_preg (cfg);
3684 int klass_reg = alloc_preg (cfg);
3685 MonoBasicBlock *is_null_bb = NULL;
3687 int to_klass_reg, context_used;
3690 NEW_BBLOCK (cfg, is_null_bb);
3692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3696 tls_get = mono_get_jit_tls_intrinsic (cfg);
3698 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3702 MONO_ADD_INS (cfg->cbb, tls_get);
3703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3708 context_used = mini_class_check_context_used (cfg, klass);
3710 MonoInst *class_ins;
3712 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3713 to_klass_reg = class_ins->dreg;
3715 to_klass_reg = alloc_preg (cfg);
3716 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3718 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3721 MONO_START_BB (cfg, is_null_bb);
3723 *out_bblock = cfg->cbb;
3729 reset_cast_details (MonoCompile *cfg)
3731 /* Reset the variables holding the cast details */
3732 if (mini_get_debug_options ()->better_cast_details) {
3733 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3735 MONO_ADD_INS (cfg->cbb, tls_get);
3736 /* It is enough to reset the from field */
3737 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3742 * On return the caller must check @array_class for load errors
3745 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3747 int vtable_reg = alloc_preg (cfg);
3750 context_used = mini_class_check_context_used (cfg, array_class);
3752 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3754 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3756 if (cfg->opt & MONO_OPT_SHARED) {
3757 int class_reg = alloc_preg (cfg);
3758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3759 if (cfg->compile_aot) {
3760 int klass_reg = alloc_preg (cfg);
3761 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3762 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3766 } else if (context_used) {
3767 MonoInst *vtable_ins;
3769 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3770 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3772 if (cfg->compile_aot) {
3776 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3778 vt_reg = alloc_preg (cfg);
3779 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3780 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3783 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3789 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3791 reset_cast_details (cfg);
3795 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3796 * generic code is generated.
3799 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3801 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3804 MonoInst *rgctx, *addr;
3806 /* FIXME: What if the class is shared? We might not
3807 have to get the address of the method from the
3809 addr = emit_get_rgctx_method (cfg, context_used, method,
3810 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3812 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3814 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3816 gboolean pass_vtable, pass_mrgctx;
3817 MonoInst *rgctx_arg = NULL;
3819 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3820 g_assert (!pass_mrgctx);
3823 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3826 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3829 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3834 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3838 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3839 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3840 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3841 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3843 obj_reg = sp [0]->dreg;
3844 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3847 /* FIXME: generics */
3848 g_assert (klass->rank == 0);
3851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3852 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3858 MonoInst *element_class;
3860 /* This assertion is from the unboxcast insn */
3861 g_assert (klass->rank == 0);
3863 element_class = emit_get_rgctx_klass (cfg, context_used,
3864 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3866 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3867 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3869 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3870 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3871 reset_cast_details (cfg);
3874 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3875 MONO_ADD_INS (cfg->cbb, add);
3876 add->type = STACK_MP;
3883 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3885 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3886 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3890 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3896 args [1] = klass_inst;
3899 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3901 NEW_BBLOCK (cfg, is_ref_bb);
3902 NEW_BBLOCK (cfg, is_nullable_bb);
3903 NEW_BBLOCK (cfg, end_bb);
3904 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3911 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3912 addr_reg = alloc_dreg (cfg, STACK_MP);
3916 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3917 MONO_ADD_INS (cfg->cbb, addr);
3919 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3922 MONO_START_BB (cfg, is_ref_bb);
3924 /* Save the ref to a temporary */
3925 dreg = alloc_ireg (cfg);
3926 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3927 addr->dreg = addr_reg;
3928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, is_nullable_bb);
3935 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3936 MonoInst *unbox_call;
3937 MonoMethodSignature *unbox_sig;
3939 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3940 unbox_sig->ret = &klass->byval_arg;
3941 unbox_sig->param_count = 1;
3942 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3943 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3945 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3946 addr->dreg = addr_reg;
3949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3952 MONO_START_BB (cfg, end_bb);
3955 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3957 *out_cbb = cfg->cbb;
3963 * Returns NULL and set the cfg exception on error.
3966 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3968 MonoInst *iargs [2];
3974 MonoInst *iargs [2];
3975 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3977 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3979 if (cfg->opt & MONO_OPT_SHARED)
3980 rgctx_info = MONO_RGCTX_INFO_KLASS;
3982 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3983 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3985 if (cfg->opt & MONO_OPT_SHARED) {
3986 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3988 alloc_ftn = mono_object_new;
3991 alloc_ftn = mono_object_new_specific;
3994 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3995 if (known_instance_size) {
3996 int size = mono_class_instance_size (klass);
3998 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4000 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4003 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4006 if (cfg->opt & MONO_OPT_SHARED) {
4007 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4008 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4010 alloc_ftn = mono_object_new;
4011 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4012 /* This happens often in argument checking code, eg. throw new FooException... */
4013 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4014 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4015 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4017 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4018 MonoMethod *managed_alloc = NULL;
4022 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4023 cfg->exception_ptr = klass;
4027 #ifndef MONO_CROSS_COMPILE
4028 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4031 if (managed_alloc) {
4032 int size = mono_class_instance_size (klass);
4034 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4035 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4036 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4038 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4040 guint32 lw = vtable->klass->instance_size;
4041 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4042 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4043 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4046 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4050 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4054 * Returns NULL and set the cfg exception on error.
4057 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4059 MonoInst *alloc, *ins;
4061 *out_cbb = cfg->cbb;
4063 if (mono_class_is_nullable (klass)) {
4064 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4067 /* FIXME: What if the class is shared? We might not
4068 have to get the method address from the RGCTX. */
4069 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4070 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4071 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4073 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4075 gboolean pass_vtable, pass_mrgctx;
4076 MonoInst *rgctx_arg = NULL;
4078 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4079 g_assert (!pass_mrgctx);
4082 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4085 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4088 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4092 if (mini_is_gsharedvt_klass (cfg, klass)) {
4093 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4094 MonoInst *res, *is_ref, *src_var, *addr;
4097 dreg = alloc_ireg (cfg);
4099 NEW_BBLOCK (cfg, is_ref_bb);
4100 NEW_BBLOCK (cfg, is_nullable_bb);
4101 NEW_BBLOCK (cfg, end_bb);
4102 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4103 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4110 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4113 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4114 ins->opcode = OP_STOREV_MEMBASE;
4116 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4117 res->type = STACK_OBJ;
4119 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4122 MONO_START_BB (cfg, is_ref_bb);
4124 /* val is a vtype, so has to load the value manually */
4125 src_var = get_vreg_to_inst (cfg, val->dreg);
4127 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4128 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4130 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4133 MONO_START_BB (cfg, is_nullable_bb);
4136 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4137 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4139 MonoMethodSignature *box_sig;
4142 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4143 * construct that method at JIT time, so have to do things by hand.
4145 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4146 box_sig->ret = &mono_defaults.object_class->byval_arg;
4147 box_sig->param_count = 1;
4148 box_sig->params [0] = &klass->byval_arg;
4149 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4150 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4151 res->type = STACK_OBJ;
4155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4157 MONO_START_BB (cfg, end_bb);
4159 *out_cbb = cfg->cbb;
4163 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4167 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4173 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4176 MonoGenericContainer *container;
4177 MonoGenericInst *ginst;
4179 if (klass->generic_class) {
4180 container = klass->generic_class->container_class->generic_container;
4181 ginst = klass->generic_class->context.class_inst;
4182 } else if (klass->generic_container && context_used) {
4183 container = klass->generic_container;
4184 ginst = container->context.class_inst;
4189 for (i = 0; i < container->type_argc; ++i) {
4191 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4193 type = ginst->type_argv [i];
4194 if (mini_type_is_reference (cfg, type))
4200 static GHashTable* direct_icall_type_hash;
4203 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4205 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4206 if (!direct_icalls_enabled (cfg))
4210 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4211 * Whitelist a few icalls for now.
4213 if (!direct_icall_type_hash) {
4214 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4216 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4217 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4218 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4219 mono_memory_barrier ();
4220 direct_icall_type_hash = h;
4223 if (cmethod->klass == mono_defaults.math_class)
4225 /* No locking needed */
4226 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4231 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4234 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4236 MonoMethod *mono_castclass;
4239 mono_castclass = mono_marshal_get_castclass_with_cache ();
4241 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4242 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4243 reset_cast_details (cfg);
4244 *out_bblock = cfg->cbb;
4250 get_castclass_cache_idx (MonoCompile *cfg)
4252 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4253 cfg->castclass_cache_index ++;
4254 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4258 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4267 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4270 if (cfg->compile_aot) {
4271 idx = get_castclass_cache_idx (cfg);
4272 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4274 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4277 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4279 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4283 * Returns NULL and set the cfg exception on error.
4286 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4288 MonoBasicBlock *is_null_bb;
4289 int obj_reg = src->dreg;
4290 int vtable_reg = alloc_preg (cfg);
4292 MonoInst *klass_inst = NULL, *res;
4293 MonoBasicBlock *bblock;
4297 context_used = mini_class_check_context_used (cfg, klass);
4299 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4300 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4301 (*inline_costs) += 2;
4304 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4305 MonoMethod *mono_castclass;
4306 MonoInst *iargs [1];
4309 mono_castclass = mono_marshal_get_castclass (klass);
4312 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4313 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4314 iargs, ip, cfg->real_offset, TRUE, &bblock);
4315 reset_cast_details (cfg);
4316 CHECK_CFG_EXCEPTION;
4317 g_assert (costs > 0);
4319 cfg->real_offset += 5;
4321 (*inline_costs) += costs;
4330 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4331 MonoInst *cache_ins;
4333 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4338 /* klass - it's the second element of the cache entry*/
4339 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4342 args [2] = cache_ins;
4344 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4347 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4350 NEW_BBLOCK (cfg, is_null_bb);
4352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4355 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4357 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4359 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4361 int klass_reg = alloc_preg (cfg);
4363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4365 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4366 /* the remoting code is broken, access the class for now */
4367 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4368 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4370 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4371 cfg->exception_ptr = klass;
4374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4379 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4382 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4386 MONO_START_BB (cfg, is_null_bb);
4388 reset_cast_details (cfg);
4399 * Returns NULL and set the cfg exception on error.
4402 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4405 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4406 int obj_reg = src->dreg;
4407 int vtable_reg = alloc_preg (cfg);
4408 int res_reg = alloc_ireg_ref (cfg);
4409 MonoInst *klass_inst = NULL;
4414 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4415 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4416 MonoInst *cache_ins;
4418 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4423 /* klass - it's the second element of the cache entry*/
4424 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4427 args [2] = cache_ins;
4429 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4432 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4435 NEW_BBLOCK (cfg, is_null_bb);
4436 NEW_BBLOCK (cfg, false_bb);
4437 NEW_BBLOCK (cfg, end_bb);
4439 /* Do the assignment at the beginning, so the other assignment can be if converted */
4440 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4441 ins->type = STACK_OBJ;
4444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4449 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4450 g_assert (!context_used);
4451 /* the is_null_bb target simply copies the input register to the output */
4452 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4454 int klass_reg = alloc_preg (cfg);
4457 int rank_reg = alloc_preg (cfg);
4458 int eclass_reg = alloc_preg (cfg);
4460 g_assert (!context_used);
4461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4463 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4466 if (klass->cast_class == mono_defaults.object_class) {
4467 int parent_reg = alloc_preg (cfg);
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4469 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4470 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4472 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4473 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4474 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4476 } else if (klass->cast_class == mono_defaults.enum_class) {
4477 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4479 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4480 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4482 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4483 /* Check that the object is a vector too */
4484 int bounds_reg = alloc_preg (cfg);
4485 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4487 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4490 /* the is_null_bb target simply copies the input register to the output */
4491 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4493 } else if (mono_class_is_nullable (klass)) {
4494 g_assert (!context_used);
4495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4496 /* the is_null_bb target simply copies the input register to the output */
4497 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4499 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4500 g_assert (!context_used);
4501 /* the remoting code is broken, access the class for now */
4502 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4503 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4505 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4506 cfg->exception_ptr = klass;
4509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4517 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4518 /* the is_null_bb target simply copies the input register to the output */
4519 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4524 MONO_START_BB (cfg, false_bb);
4526 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4529 MONO_START_BB (cfg, is_null_bb);
4531 MONO_START_BB (cfg, end_bb);
4537 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4539 /* This opcode takes as input an object reference and a class, and returns:
4540 0) if the object is an instance of the class,
4541 1) if the object is not instance of the class,
4542 2) if the object is a proxy whose type cannot be determined */
4545 #ifndef DISABLE_REMOTING
4546 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4548 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4550 int obj_reg = src->dreg;
4551 int dreg = alloc_ireg (cfg);
4553 #ifndef DISABLE_REMOTING
4554 int klass_reg = alloc_preg (cfg);
4557 NEW_BBLOCK (cfg, true_bb);
4558 NEW_BBLOCK (cfg, false_bb);
4559 NEW_BBLOCK (cfg, end_bb);
4560 #ifndef DISABLE_REMOTING
4561 NEW_BBLOCK (cfg, false2_bb);
4562 NEW_BBLOCK (cfg, no_proxy_bb);
4565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4568 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4569 #ifndef DISABLE_REMOTING
4570 NEW_BBLOCK (cfg, interface_fail_bb);
4573 tmp_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 #ifndef DISABLE_REMOTING
4576 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4577 MONO_START_BB (cfg, interface_fail_bb);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4580 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4582 tmp_reg = alloc_preg (cfg);
4583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4585 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4587 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4590 #ifndef DISABLE_REMOTING
4591 tmp_reg = alloc_preg (cfg);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4595 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4596 tmp_reg = alloc_preg (cfg);
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4600 tmp_reg = alloc_preg (cfg);
4601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4605 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4608 MONO_START_BB (cfg, no_proxy_bb);
4610 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4612 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4616 MONO_START_BB (cfg, false_bb);
4618 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4621 #ifndef DISABLE_REMOTING
4622 MONO_START_BB (cfg, false2_bb);
4624 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4628 MONO_START_BB (cfg, true_bb);
4630 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4632 MONO_START_BB (cfg, end_bb);
4635 MONO_INST_NEW (cfg, ins, OP_ICONST);
4637 ins->type = STACK_I4;
4643 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4645 /* This opcode takes as input an object reference and a class, and returns:
4646 0) if the object is an instance of the class,
4647 1) if the object is a proxy whose type cannot be determined
4648 an InvalidCastException exception is thrown otherwhise*/
4651 #ifndef DISABLE_REMOTING
4652 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4654 MonoBasicBlock *ok_result_bb;
4656 int obj_reg = src->dreg;
4657 int dreg = alloc_ireg (cfg);
4658 int tmp_reg = alloc_preg (cfg);
4660 #ifndef DISABLE_REMOTING
4661 int klass_reg = alloc_preg (cfg);
4662 NEW_BBLOCK (cfg, end_bb);
4665 NEW_BBLOCK (cfg, ok_result_bb);
4667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4670 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4672 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4673 #ifndef DISABLE_REMOTING
4674 NEW_BBLOCK (cfg, interface_fail_bb);
4676 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4677 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4678 MONO_START_BB (cfg, interface_fail_bb);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4681 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4683 tmp_reg = alloc_preg (cfg);
4684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4686 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4688 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4692 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4696 #ifndef DISABLE_REMOTING
4697 NEW_BBLOCK (cfg, no_proxy_bb);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4701 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4703 tmp_reg = alloc_preg (cfg);
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4707 tmp_reg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4712 NEW_BBLOCK (cfg, fail_1_bb);
4714 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4716 MONO_START_BB (cfg, fail_1_bb);
4718 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4721 MONO_START_BB (cfg, no_proxy_bb);
4723 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4725 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4729 MONO_START_BB (cfg, ok_result_bb);
4731 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4733 #ifndef DISABLE_REMOTING
4734 MONO_START_BB (cfg, end_bb);
4738 MONO_INST_NEW (cfg, ins, OP_ICONST);
4740 ins->type = STACK_I4;
4745 static G_GNUC_UNUSED MonoInst*
4746 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4748 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4749 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4752 switch (enum_type->type) {
4755 #if SIZEOF_REGISTER == 8
4767 MonoInst *load, *and, *cmp, *ceq;
4768 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4769 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4770 int dest_reg = alloc_ireg (cfg);
4772 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4773 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4774 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4775 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4777 ceq->type = STACK_I4;
4780 load = mono_decompose_opcode (cfg, load, NULL);
4781 and = mono_decompose_opcode (cfg, and, NULL);
4782 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4783 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4791 * Returns NULL and set the cfg exception on error.
4793 static G_GNUC_UNUSED MonoInst*
4794 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4798 gpointer trampoline;
4799 MonoInst *obj, *method_ins, *tramp_ins;
4803 // FIXME reenable optimisation for virtual case
4808 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4811 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4815 obj = handle_alloc (cfg, klass, FALSE, 0);
4819 /* Inline the contents of mono_delegate_ctor */
4821 /* Set target field */
4822 /* Optimize away setting of NULL target */
4823 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4825 if (cfg->gen_write_barriers) {
4826 dreg = alloc_preg (cfg);
4827 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4828 emit_write_barrier (cfg, ptr, target);
4832 /* Set method field */
4833 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4834 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4837 * To avoid looking up the compiled code belonging to the target method
4838 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4839 * store it, and we fill it after the method has been compiled.
4841 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4842 MonoInst *code_slot_ins;
4845 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4847 domain = mono_domain_get ();
4848 mono_domain_lock (domain);
4849 if (!domain_jit_info (domain)->method_code_hash)
4850 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4851 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4853 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4854 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4856 mono_domain_unlock (domain);
4858 if (cfg->compile_aot)
4859 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4861 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4866 if (cfg->compile_aot) {
4867 MonoDelegateClassMethodPair *del_tramp;
4869 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4870 del_tramp->klass = klass;
4871 del_tramp->method = context_used ? NULL : method;
4872 del_tramp->virtual = virtual;
4873 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4876 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4878 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4879 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4882 /* Set invoke_impl field */
4884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4886 dreg = alloc_preg (cfg);
4887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4890 dreg = alloc_preg (cfg);
4891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4895 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4901 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4903 MonoJitICallInfo *info;
4905 /* Need to register the icall so it gets an icall wrapper */
4906 info = mono_get_array_new_va_icall (rank);
4908 cfg->flags |= MONO_CFG_HAS_VARARGS;
4910 /* mono_array_new_va () needs a vararg calling convention */
4911 cfg->disable_llvm = TRUE;
4913 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4914 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4918 * handle_constrained_gsharedvt_call:
4920 * Handle constrained calls where the receiver is a gsharedvt type.
4921 * Return the instruction representing the call. Set the cfg exception on failure.
4924 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4925 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4927 MonoInst *ins = NULL;
4928 MonoBasicBlock *bblock = *ref_bblock;
4929 gboolean emit_widen = *ref_emit_widen;
4932 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4933 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4934 * pack the arguments into an array, and do the rest of the work in in an icall.
4936 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4937 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4938 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4939 MonoInst *args [16];
4942 * This case handles calls to
4943 * - object:ToString()/Equals()/GetHashCode(),
4944 * - System.IComparable<T>:CompareTo()
4945 * - System.IEquatable<T>:Equals ()
4946 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4950 if (mono_method_check_context_used (cmethod))
4951 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4953 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4954 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4956 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4957 if (fsig->hasthis && fsig->param_count) {
4958 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4959 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4960 ins->dreg = alloc_preg (cfg);
4961 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4962 MONO_ADD_INS (cfg->cbb, ins);
4965 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4968 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4970 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4971 addr_reg = ins->dreg;
4972 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4974 EMIT_NEW_ICONST (cfg, args [3], 0);
4975 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4978 EMIT_NEW_ICONST (cfg, args [3], 0);
4979 EMIT_NEW_ICONST (cfg, args [4], 0);
4981 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4984 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4985 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4986 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4990 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4991 MONO_ADD_INS (cfg->cbb, add);
4993 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4994 MONO_ADD_INS (cfg->cbb, ins);
4995 /* ins represents the call result */
4998 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5001 *ref_emit_widen = emit_widen;
5002 *ref_bblock = bblock;
5011 mono_emit_load_got_addr (MonoCompile *cfg)
5013 MonoInst *getaddr, *dummy_use;
5015 if (!cfg->got_var || cfg->got_var_allocated)
5018 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5019 getaddr->cil_code = cfg->header->code;
5020 getaddr->dreg = cfg->got_var->dreg;
5022 /* Add it to the start of the first bblock */
5023 if (cfg->bb_entry->code) {
5024 getaddr->next = cfg->bb_entry->code;
5025 cfg->bb_entry->code = getaddr;
5028 MONO_ADD_INS (cfg->bb_entry, getaddr);
5030 cfg->got_var_allocated = TRUE;
5033 * Add a dummy use to keep the got_var alive, since real uses might
5034 * only be generated by the back ends.
5035 * Add it to end_bblock, so the variable's lifetime covers the whole
5037 * It would be better to make the usage of the got var explicit in all
5038 * cases when the backend needs it (i.e. calls, throw etc.), so this
5039 * wouldn't be needed.
5041 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5042 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5045 static int inline_limit;
5046 static gboolean inline_limit_inited;
5049 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5051 MonoMethodHeaderSummary header;
5053 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5054 MonoMethodSignature *sig = mono_method_signature (method);
5058 if (cfg->disable_inline)
5060 if (cfg->generic_sharing_context)
5063 if (cfg->inline_depth > 10)
5066 #ifdef MONO_ARCH_HAVE_LMF_OPS
5067 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5068 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5069 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5074 if (!mono_method_get_header_summary (method, &header))
5077 /*runtime, icall and pinvoke are checked by summary call*/
5078 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5079 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5080 (mono_class_is_marshalbyref (method->klass)) ||
5084 /* also consider num_locals? */
5085 /* Do the size check early to avoid creating vtables */
5086 if (!inline_limit_inited) {
5087 if (g_getenv ("MONO_INLINELIMIT"))
5088 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5090 inline_limit = INLINE_LENGTH_LIMIT;
5091 inline_limit_inited = TRUE;
5093 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5097 * if we can initialize the class of the method right away, we do,
5098 * otherwise we don't allow inlining if the class needs initialization,
5099 * since it would mean inserting a call to mono_runtime_class_init()
5100 * inside the inlined code
5102 if (!(cfg->opt & MONO_OPT_SHARED)) {
5103 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5104 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5105 vtable = mono_class_vtable (cfg->domain, method->klass);
5108 if (!cfg->compile_aot)
5109 mono_runtime_class_init (vtable);
5110 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5111 if (cfg->run_cctors && method->klass->has_cctor) {
5112 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5113 if (!method->klass->runtime_info)
5114 /* No vtable created yet */
5116 vtable = mono_class_vtable (cfg->domain, method->klass);
5119 /* This makes so that inline cannot trigger */
5120 /* .cctors: too many apps depend on them */
5121 /* running with a specific order... */
5122 if (! vtable->initialized)
5124 mono_runtime_class_init (vtable);
5126 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5127 if (!method->klass->runtime_info)
5128 /* No vtable created yet */
5130 vtable = mono_class_vtable (cfg->domain, method->klass);
5133 if (!vtable->initialized)
5138 * If we're compiling for shared code
5139 * the cctor will need to be run at aot method load time, for example,
5140 * or at the end of the compilation of the inlining method.
5142 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5147 * CAS - do not inline methods with declarative security
5148 * Note: this has to be before any possible return TRUE;
5150 if (mono_security_method_has_declsec (method))
5153 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5154 if (mono_arch_is_soft_float ()) {
5156 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5158 for (i = 0; i < sig->param_count; ++i)
5159 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5164 if (g_list_find (cfg->dont_inline, method))
5171 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5173 if (!cfg->compile_aot) {
5175 if (vtable->initialized)
5179 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5180 if (cfg->method == method)
5184 if (!mono_class_needs_cctor_run (klass, method))
5187 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5188 /* The initialization is already done before the method is called */
5195 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5199 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5202 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5205 mono_class_init (klass);
5206 size = mono_class_array_element_size (klass);
5209 mult_reg = alloc_preg (cfg);
5210 array_reg = arr->dreg;
5211 index_reg = index->dreg;
5213 #if SIZEOF_REGISTER == 8
5214 /* The array reg is 64 bits but the index reg is only 32 */
5215 if (COMPILE_LLVM (cfg)) {
5217 index2_reg = index_reg;
5219 index2_reg = alloc_preg (cfg);
5220 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5223 if (index->type == STACK_I8) {
5224 index2_reg = alloc_preg (cfg);
5225 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5227 index2_reg = index_reg;
5232 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5234 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5235 if (size == 1 || size == 2 || size == 4 || size == 8) {
5236 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5238 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5239 ins->klass = mono_class_get_element_class (klass);
5240 ins->type = STACK_MP;
5246 add_reg = alloc_ireg_mp (cfg);
5249 MonoInst *rgctx_ins;
5252 g_assert (cfg->generic_sharing_context);
5253 context_used = mini_class_check_context_used (cfg, klass);
5254 g_assert (context_used);
5255 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5256 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5260 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5261 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5262 ins->klass = mono_class_get_element_class (klass);
5263 ins->type = STACK_MP;
5264 MONO_ADD_INS (cfg->cbb, ins);
5269 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5271 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5273 int bounds_reg = alloc_preg (cfg);
5274 int add_reg = alloc_ireg_mp (cfg);
5275 int mult_reg = alloc_preg (cfg);
5276 int mult2_reg = alloc_preg (cfg);
5277 int low1_reg = alloc_preg (cfg);
5278 int low2_reg = alloc_preg (cfg);
5279 int high1_reg = alloc_preg (cfg);
5280 int high2_reg = alloc_preg (cfg);
5281 int realidx1_reg = alloc_preg (cfg);
5282 int realidx2_reg = alloc_preg (cfg);
5283 int sum_reg = alloc_preg (cfg);
5284 int index1, index2, tmpreg;
5288 mono_class_init (klass);
5289 size = mono_class_array_element_size (klass);
5291 index1 = index_ins1->dreg;
5292 index2 = index_ins2->dreg;
5294 #if SIZEOF_REGISTER == 8
5295 /* The array reg is 64 bits but the index reg is only 32 */
5296 if (COMPILE_LLVM (cfg)) {
5299 tmpreg = alloc_preg (cfg);
5300 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5302 tmpreg = alloc_preg (cfg);
5303 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5307 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5311 /* range checking */
5312 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5313 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5315 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5316 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5317 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5318 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5319 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5320 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5321 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5323 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5324 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5325 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5326 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5327 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5328 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5329 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5331 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5332 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5334 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5335 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5337 ins->type = STACK_MP;
5339 MONO_ADD_INS (cfg->cbb, ins);
5346 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5350 MonoMethod *addr_method;
5352 MonoClass *eclass = cmethod->klass->element_class;
5354 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5357 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5359 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5360 /* emit_ldelema_2 depends on OP_LMUL */
5361 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5362 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5366 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5369 element_size = mono_class_array_element_size (eclass);
5370 addr_method = mono_marshal_get_array_address (rank, element_size);
5371 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5376 static MonoBreakPolicy
5377 always_insert_breakpoint (MonoMethod *method)
5379 return MONO_BREAK_POLICY_ALWAYS;
5382 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5385 * mono_set_break_policy:
5386 * policy_callback: the new callback function
5388 * Allow embedders to decide wherther to actually obey breakpoint instructions
5389 * (both break IL instructions and Debugger.Break () method calls), for example
5390 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5391 * untrusted or semi-trusted code.
5393 * @policy_callback will be called every time a break point instruction needs to
5394 * be inserted with the method argument being the method that calls Debugger.Break()
5395 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5396 * if it wants the breakpoint to not be effective in the given method.
5397 * #MONO_BREAK_POLICY_ALWAYS is the default.
5400 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5402 if (policy_callback)
5403 break_policy_func = policy_callback;
5405 break_policy_func = always_insert_breakpoint;
5409 should_insert_brekpoint (MonoMethod *method) {
5410 switch (break_policy_func (method)) {
5411 case MONO_BREAK_POLICY_ALWAYS:
5413 case MONO_BREAK_POLICY_NEVER:
5415 case MONO_BREAK_POLICY_ON_DBG:
5416 g_warning ("mdb no longer supported");
5419 g_warning ("Incorrect value returned from break policy callback");
5424 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5426 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5428 MonoInst *addr, *store, *load;
5429 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5431 /* the bounds check is already done by the callers */
5432 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5434 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5435 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5436 if (mini_type_is_reference (cfg, fsig->params [2]))
5437 emit_write_barrier (cfg, addr, load);
5439 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5440 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5447 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5449 return mini_type_is_reference (cfg, &klass->byval_arg);
5453 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5455 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5456 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5457 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5458 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5459 MonoInst *iargs [3];
5462 mono_class_setup_vtable (obj_array);
5463 g_assert (helper->slot);
5465 if (sp [0]->type != STACK_OBJ)
5467 if (sp [2]->type != STACK_OBJ)
5474 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5478 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5481 // FIXME-VT: OP_ICONST optimization
5482 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5483 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5484 ins->opcode = OP_STOREV_MEMBASE;
5485 } else if (sp [1]->opcode == OP_ICONST) {
5486 int array_reg = sp [0]->dreg;
5487 int index_reg = sp [1]->dreg;
5488 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5491 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5492 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5494 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5495 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5496 if (generic_class_is_reference_type (cfg, klass))
5497 emit_write_barrier (cfg, addr, sp [2]);
5504 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5509 eklass = mono_class_from_mono_type (fsig->params [2]);
5511 eklass = mono_class_from_mono_type (fsig->ret);
5514 return emit_array_store (cfg, eklass, args, FALSE);
5516 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5517 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5523 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5527 //Only allow for valuetypes
5528 if (!param_klass->valuetype || !return_klass->valuetype)
5532 if (param_klass->has_references || return_klass->has_references)
5535 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5536 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5537 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5540 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5541 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5544 //And have the same size
5545 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5551 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5553 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5554 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5556 //Valuetypes that are semantically equivalent
5557 if (is_unsafe_mov_compatible (param_klass, return_klass))
5560 //Arrays of valuetypes that are semantically equivalent
5561 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5568 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5570 #ifdef MONO_ARCH_SIMD_INTRINSICS
5571 MonoInst *ins = NULL;
5573 if (cfg->opt & MONO_OPT_SIMD) {
5574 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5580 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5584 emit_memory_barrier (MonoCompile *cfg, int kind)
5586 MonoInst *ins = NULL;
5587 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5588 MONO_ADD_INS (cfg->cbb, ins);
5589 ins->backend.memory_barrier_kind = kind;
5595 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5597 MonoInst *ins = NULL;
5600 /* The LLVM backend supports these intrinsics */
5601 if (cmethod->klass == mono_defaults.math_class) {
5602 if (strcmp (cmethod->name, "Sin") == 0) {
5604 } else if (strcmp (cmethod->name, "Cos") == 0) {
5606 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5608 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5612 if (opcode && fsig->param_count == 1) {
5613 MONO_INST_NEW (cfg, ins, opcode);
5614 ins->type = STACK_R8;
5615 ins->dreg = mono_alloc_freg (cfg);
5616 ins->sreg1 = args [0]->dreg;
5617 MONO_ADD_INS (cfg->cbb, ins);
5621 if (cfg->opt & MONO_OPT_CMOV) {
5622 if (strcmp (cmethod->name, "Min") == 0) {
5623 if (fsig->params [0]->type == MONO_TYPE_I4)
5625 if (fsig->params [0]->type == MONO_TYPE_U4)
5626 opcode = OP_IMIN_UN;
5627 else if (fsig->params [0]->type == MONO_TYPE_I8)
5629 else if (fsig->params [0]->type == MONO_TYPE_U8)
5630 opcode = OP_LMIN_UN;
5631 } else if (strcmp (cmethod->name, "Max") == 0) {
5632 if (fsig->params [0]->type == MONO_TYPE_I4)
5634 if (fsig->params [0]->type == MONO_TYPE_U4)
5635 opcode = OP_IMAX_UN;
5636 else if (fsig->params [0]->type == MONO_TYPE_I8)
5638 else if (fsig->params [0]->type == MONO_TYPE_U8)
5639 opcode = OP_LMAX_UN;
5643 if (opcode && fsig->param_count == 2) {
5644 MONO_INST_NEW (cfg, ins, opcode);
5645 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5646 ins->dreg = mono_alloc_ireg (cfg);
5647 ins->sreg1 = args [0]->dreg;
5648 ins->sreg2 = args [1]->dreg;
5649 MONO_ADD_INS (cfg->cbb, ins);
5657 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5659 if (cmethod->klass == mono_defaults.array_class) {
5660 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5661 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5662 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5663 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5664 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5665 return emit_array_unsafe_mov (cfg, fsig, args);
5672 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5674 MonoInst *ins = NULL;
5676 static MonoClass *runtime_helpers_class = NULL;
5677 if (! runtime_helpers_class)
5678 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5679 "System.Runtime.CompilerServices", "RuntimeHelpers");
5681 if (cmethod->klass == mono_defaults.string_class) {
5682 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5683 int dreg = alloc_ireg (cfg);
5684 int index_reg = alloc_preg (cfg);
5685 int add_reg = alloc_preg (cfg);
5687 #if SIZEOF_REGISTER == 8
5688 /* The array reg is 64 bits but the index reg is only 32 */
5689 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5691 index_reg = args [1]->dreg;
5693 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5695 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5696 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5697 add_reg = ins->dreg;
5698 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5701 int mult_reg = alloc_preg (cfg);
5702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5703 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5704 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5705 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5707 type_from_op (cfg, ins, NULL, NULL);
5709 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5710 int dreg = alloc_ireg (cfg);
5711 /* Decompose later to allow more optimizations */
5712 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5713 ins->type = STACK_I4;
5714 ins->flags |= MONO_INST_FAULT;
5715 cfg->cbb->has_array_access = TRUE;
5716 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5719 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5720 int mult_reg = alloc_preg (cfg);
5721 int add_reg = alloc_preg (cfg);
5723 /* The corlib functions check for oob already. */
5724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5725 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5726 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5727 return cfg->cbb->last_ins;
5730 } else if (cmethod->klass == mono_defaults.object_class) {
5732 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5733 int dreg = alloc_ireg_ref (cfg);
5734 int vt_reg = alloc_preg (cfg);
5735 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5736 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5737 type_from_op (cfg, ins, NULL, NULL);
5740 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5741 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5742 int dreg = alloc_ireg (cfg);
5743 int t1 = alloc_ireg (cfg);
5745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5746 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5747 ins->type = STACK_I4;
5751 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5752 MONO_INST_NEW (cfg, ins, OP_NOP);
5753 MONO_ADD_INS (cfg->cbb, ins);
5757 } else if (cmethod->klass == mono_defaults.array_class) {
5758 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5759 return emit_array_generic_access (cfg, fsig, args, FALSE);
5760 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5761 return emit_array_generic_access (cfg, fsig, args, TRUE);
5763 #ifndef MONO_BIG_ARRAYS
5765 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5768 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5769 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5770 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5771 int dreg = alloc_ireg (cfg);
5772 int bounds_reg = alloc_ireg_mp (cfg);
5773 MonoBasicBlock *end_bb, *szarray_bb;
5774 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5776 NEW_BBLOCK (cfg, end_bb);
5777 NEW_BBLOCK (cfg, szarray_bb);
5779 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5780 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5782 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5783 /* Non-szarray case */
5785 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5786 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5788 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5789 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5791 MONO_START_BB (cfg, szarray_bb);
5794 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5795 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5797 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5798 MONO_START_BB (cfg, end_bb);
5800 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5801 ins->type = STACK_I4;
5807 if (cmethod->name [0] != 'g')
5810 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5811 int dreg = alloc_ireg (cfg);
5812 int vtable_reg = alloc_preg (cfg);
5813 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5814 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5815 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5816 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5817 type_from_op (cfg, ins, NULL, NULL);
5820 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5821 int dreg = alloc_ireg (cfg);
5823 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5824 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5825 type_from_op (cfg, ins, NULL, NULL);
5830 } else if (cmethod->klass == runtime_helpers_class) {
5832 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5833 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5837 } else if (cmethod->klass == mono_defaults.thread_class) {
5838 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5839 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5840 MONO_ADD_INS (cfg->cbb, ins);
5842 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5843 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5844 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5846 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5848 if (fsig->params [0]->type == MONO_TYPE_I1)
5849 opcode = OP_LOADI1_MEMBASE;
5850 else if (fsig->params [0]->type == MONO_TYPE_U1)
5851 opcode = OP_LOADU1_MEMBASE;
5852 else if (fsig->params [0]->type == MONO_TYPE_I2)
5853 opcode = OP_LOADI2_MEMBASE;
5854 else if (fsig->params [0]->type == MONO_TYPE_U2)
5855 opcode = OP_LOADU2_MEMBASE;
5856 else if (fsig->params [0]->type == MONO_TYPE_I4)
5857 opcode = OP_LOADI4_MEMBASE;
5858 else if (fsig->params [0]->type == MONO_TYPE_U4)
5859 opcode = OP_LOADU4_MEMBASE;
5860 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5861 opcode = OP_LOADI8_MEMBASE;
5862 else if (fsig->params [0]->type == MONO_TYPE_R4)
5863 opcode = OP_LOADR4_MEMBASE;
5864 else if (fsig->params [0]->type == MONO_TYPE_R8)
5865 opcode = OP_LOADR8_MEMBASE;
5866 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5867 opcode = OP_LOAD_MEMBASE;
5870 MONO_INST_NEW (cfg, ins, opcode);
5871 ins->inst_basereg = args [0]->dreg;
5872 ins->inst_offset = 0;
5873 MONO_ADD_INS (cfg->cbb, ins);
5875 switch (fsig->params [0]->type) {
5882 ins->dreg = mono_alloc_ireg (cfg);
5883 ins->type = STACK_I4;
5887 ins->dreg = mono_alloc_lreg (cfg);
5888 ins->type = STACK_I8;
5892 ins->dreg = mono_alloc_ireg (cfg);
5893 #if SIZEOF_REGISTER == 8
5894 ins->type = STACK_I8;
5896 ins->type = STACK_I4;
5901 ins->dreg = mono_alloc_freg (cfg);
5902 ins->type = STACK_R8;
5905 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5906 ins->dreg = mono_alloc_ireg_ref (cfg);
5907 ins->type = STACK_OBJ;
5911 if (opcode == OP_LOADI8_MEMBASE)
5912 ins = mono_decompose_opcode (cfg, ins, NULL);
5914 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5918 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5920 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5922 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5923 opcode = OP_STOREI1_MEMBASE_REG;
5924 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5925 opcode = OP_STOREI2_MEMBASE_REG;
5926 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5927 opcode = OP_STOREI4_MEMBASE_REG;
5928 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5929 opcode = OP_STOREI8_MEMBASE_REG;
5930 else if (fsig->params [0]->type == MONO_TYPE_R4)
5931 opcode = OP_STORER4_MEMBASE_REG;
5932 else if (fsig->params [0]->type == MONO_TYPE_R8)
5933 opcode = OP_STORER8_MEMBASE_REG;
5934 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5935 opcode = OP_STORE_MEMBASE_REG;
5938 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5940 MONO_INST_NEW (cfg, ins, opcode);
5941 ins->sreg1 = args [1]->dreg;
5942 ins->inst_destbasereg = args [0]->dreg;
5943 ins->inst_offset = 0;
5944 MONO_ADD_INS (cfg->cbb, ins);
5946 if (opcode == OP_STOREI8_MEMBASE_REG)
5947 ins = mono_decompose_opcode (cfg, ins, NULL);
5952 } else if (cmethod->klass == mono_defaults.monitor_class) {
5953 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5954 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5957 if (COMPILE_LLVM (cfg)) {
5959 * Pass the argument normally, the LLVM backend will handle the
5960 * calling convention problems.
5962 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5964 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5965 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5966 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5967 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5970 return (MonoInst*)call;
5971 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5972 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5975 if (COMPILE_LLVM (cfg)) {
5977 * Pass the argument normally, the LLVM backend will handle the
5978 * calling convention problems.
5980 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5982 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5983 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5984 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5985 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5988 return (MonoInst*)call;
5990 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5993 if (COMPILE_LLVM (cfg)) {
5994 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5996 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5997 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5998 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5999 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6002 return (MonoInst*)call;
6005 } else if (cmethod->klass->image == mono_defaults.corlib &&
6006 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6007 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6010 #if SIZEOF_REGISTER == 8
6011 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6012 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6013 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6014 ins->dreg = mono_alloc_preg (cfg);
6015 ins->sreg1 = args [0]->dreg;
6016 ins->type = STACK_I8;
6017 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6018 MONO_ADD_INS (cfg->cbb, ins);
6022 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6024 /* 64 bit reads are already atomic */
6025 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6026 load_ins->dreg = mono_alloc_preg (cfg);
6027 load_ins->inst_basereg = args [0]->dreg;
6028 load_ins->inst_offset = 0;
6029 load_ins->type = STACK_I8;
6030 MONO_ADD_INS (cfg->cbb, load_ins);
6032 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6039 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6040 MonoInst *ins_iconst;
6043 if (fsig->params [0]->type == MONO_TYPE_I4) {
6044 opcode = OP_ATOMIC_ADD_I4;
6045 cfg->has_atomic_add_i4 = TRUE;
6047 #if SIZEOF_REGISTER == 8
6048 else if (fsig->params [0]->type == MONO_TYPE_I8)
6049 opcode = OP_ATOMIC_ADD_I8;
6052 if (!mono_arch_opcode_supported (opcode))
6054 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6055 ins_iconst->inst_c0 = 1;
6056 ins_iconst->dreg = mono_alloc_ireg (cfg);
6057 MONO_ADD_INS (cfg->cbb, ins_iconst);
6059 MONO_INST_NEW (cfg, ins, opcode);
6060 ins->dreg = mono_alloc_ireg (cfg);
6061 ins->inst_basereg = args [0]->dreg;
6062 ins->inst_offset = 0;
6063 ins->sreg2 = ins_iconst->dreg;
6064 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6065 MONO_ADD_INS (cfg->cbb, ins);
6067 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6068 MonoInst *ins_iconst;
6071 if (fsig->params [0]->type == MONO_TYPE_I4) {
6072 opcode = OP_ATOMIC_ADD_I4;
6073 cfg->has_atomic_add_i4 = TRUE;
6075 #if SIZEOF_REGISTER == 8
6076 else if (fsig->params [0]->type == MONO_TYPE_I8)
6077 opcode = OP_ATOMIC_ADD_I8;
6080 if (!mono_arch_opcode_supported (opcode))
6082 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6083 ins_iconst->inst_c0 = -1;
6084 ins_iconst->dreg = mono_alloc_ireg (cfg);
6085 MONO_ADD_INS (cfg->cbb, ins_iconst);
6087 MONO_INST_NEW (cfg, ins, opcode);
6088 ins->dreg = mono_alloc_ireg (cfg);
6089 ins->inst_basereg = args [0]->dreg;
6090 ins->inst_offset = 0;
6091 ins->sreg2 = ins_iconst->dreg;
6092 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6093 MONO_ADD_INS (cfg->cbb, ins);
6095 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6098 if (fsig->params [0]->type == MONO_TYPE_I4) {
6099 opcode = OP_ATOMIC_ADD_I4;
6100 cfg->has_atomic_add_i4 = TRUE;
6102 #if SIZEOF_REGISTER == 8
6103 else if (fsig->params [0]->type == MONO_TYPE_I8)
6104 opcode = OP_ATOMIC_ADD_I8;
6107 if (!mono_arch_opcode_supported (opcode))
6109 MONO_INST_NEW (cfg, ins, opcode);
6110 ins->dreg = mono_alloc_ireg (cfg);
6111 ins->inst_basereg = args [0]->dreg;
6112 ins->inst_offset = 0;
6113 ins->sreg2 = args [1]->dreg;
6114 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6115 MONO_ADD_INS (cfg->cbb, ins);
6118 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6119 MonoInst *f2i = NULL, *i2f;
6120 guint32 opcode, f2i_opcode, i2f_opcode;
6121 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6122 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6124 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6125 fsig->params [0]->type == MONO_TYPE_R4) {
6126 opcode = OP_ATOMIC_EXCHANGE_I4;
6127 f2i_opcode = OP_MOVE_F_TO_I4;
6128 i2f_opcode = OP_MOVE_I4_TO_F;
6129 cfg->has_atomic_exchange_i4 = TRUE;
6131 #if SIZEOF_REGISTER == 8
6133 fsig->params [0]->type == MONO_TYPE_I8 ||
6134 fsig->params [0]->type == MONO_TYPE_R8 ||
6135 fsig->params [0]->type == MONO_TYPE_I) {
6136 opcode = OP_ATOMIC_EXCHANGE_I8;
6137 f2i_opcode = OP_MOVE_F_TO_I8;
6138 i2f_opcode = OP_MOVE_I8_TO_F;
6141 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6142 opcode = OP_ATOMIC_EXCHANGE_I4;
6143 cfg->has_atomic_exchange_i4 = TRUE;
6149 if (!mono_arch_opcode_supported (opcode))
6153 /* TODO: Decompose these opcodes instead of bailing here. */
6154 if (COMPILE_SOFT_FLOAT (cfg))
6157 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6158 f2i->dreg = mono_alloc_ireg (cfg);
6159 f2i->sreg1 = args [1]->dreg;
6160 if (f2i_opcode == OP_MOVE_F_TO_I4)
6161 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6162 MONO_ADD_INS (cfg->cbb, f2i);
6165 MONO_INST_NEW (cfg, ins, opcode);
6166 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6167 ins->inst_basereg = args [0]->dreg;
6168 ins->inst_offset = 0;
6169 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6170 MONO_ADD_INS (cfg->cbb, ins);
6172 switch (fsig->params [0]->type) {
6174 ins->type = STACK_I4;
6177 ins->type = STACK_I8;
6180 #if SIZEOF_REGISTER == 8
6181 ins->type = STACK_I8;
6183 ins->type = STACK_I4;
6188 ins->type = STACK_R8;
6191 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6192 ins->type = STACK_OBJ;
6197 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6198 i2f->dreg = mono_alloc_freg (cfg);
6199 i2f->sreg1 = ins->dreg;
6200 i2f->type = STACK_R8;
6201 if (i2f_opcode == OP_MOVE_I4_TO_F)
6202 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6203 MONO_ADD_INS (cfg->cbb, i2f);
6208 if (cfg->gen_write_barriers && is_ref)
6209 emit_write_barrier (cfg, args [0], args [1]);
6211 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6212 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6213 guint32 opcode, f2i_opcode, i2f_opcode;
6214 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6215 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6217 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6218 fsig->params [1]->type == MONO_TYPE_R4) {
6219 opcode = OP_ATOMIC_CAS_I4;
6220 f2i_opcode = OP_MOVE_F_TO_I4;
6221 i2f_opcode = OP_MOVE_I4_TO_F;
6222 cfg->has_atomic_cas_i4 = TRUE;
6224 #if SIZEOF_REGISTER == 8
6226 fsig->params [1]->type == MONO_TYPE_I8 ||
6227 fsig->params [1]->type == MONO_TYPE_R8 ||
6228 fsig->params [1]->type == MONO_TYPE_I) {
6229 opcode = OP_ATOMIC_CAS_I8;
6230 f2i_opcode = OP_MOVE_F_TO_I8;
6231 i2f_opcode = OP_MOVE_I8_TO_F;
6234 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6235 opcode = OP_ATOMIC_CAS_I4;
6236 cfg->has_atomic_cas_i4 = TRUE;
6242 if (!mono_arch_opcode_supported (opcode))
6246 /* TODO: Decompose these opcodes instead of bailing here. */
6247 if (COMPILE_SOFT_FLOAT (cfg))
6250 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6251 f2i_new->dreg = mono_alloc_ireg (cfg);
6252 f2i_new->sreg1 = args [1]->dreg;
6253 if (f2i_opcode == OP_MOVE_F_TO_I4)
6254 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6255 MONO_ADD_INS (cfg->cbb, f2i_new);
6257 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6258 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6259 f2i_cmp->sreg1 = args [2]->dreg;
6260 if (f2i_opcode == OP_MOVE_F_TO_I4)
6261 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6262 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6265 MONO_INST_NEW (cfg, ins, opcode);
6266 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6267 ins->sreg1 = args [0]->dreg;
6268 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6269 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6270 MONO_ADD_INS (cfg->cbb, ins);
6272 switch (fsig->params [1]->type) {
6274 ins->type = STACK_I4;
6277 ins->type = STACK_I8;
6280 #if SIZEOF_REGISTER == 8
6281 ins->type = STACK_I8;
6283 ins->type = STACK_I4;
6288 ins->type = STACK_R8;
6291 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6292 ins->type = STACK_OBJ;
6297 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6298 i2f->dreg = mono_alloc_freg (cfg);
6299 i2f->sreg1 = ins->dreg;
6300 i2f->type = STACK_R8;
6301 if (i2f_opcode == OP_MOVE_I4_TO_F)
6302 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6303 MONO_ADD_INS (cfg->cbb, i2f);
6308 if (cfg->gen_write_barriers && is_ref)
6309 emit_write_barrier (cfg, args [0], args [1]);
6311 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6312 fsig->params [1]->type == MONO_TYPE_I4) {
6313 MonoInst *cmp, *ceq;
6315 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6318 /* int32 r = CAS (location, value, comparand); */
6319 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6320 ins->dreg = alloc_ireg (cfg);
6321 ins->sreg1 = args [0]->dreg;
6322 ins->sreg2 = args [1]->dreg;
6323 ins->sreg3 = args [2]->dreg;
6324 ins->type = STACK_I4;
6325 MONO_ADD_INS (cfg->cbb, ins);
6327 /* bool result = r == comparand; */
6328 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6329 cmp->sreg1 = ins->dreg;
6330 cmp->sreg2 = args [2]->dreg;
6331 cmp->type = STACK_I4;
6332 MONO_ADD_INS (cfg->cbb, cmp);
6334 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6335 ceq->dreg = alloc_ireg (cfg);
6336 ceq->type = STACK_I4;
6337 MONO_ADD_INS (cfg->cbb, ceq);
6339 /* *success = result; */
6340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6342 cfg->has_atomic_cas_i4 = TRUE;
6344 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6345 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6349 } else if (cmethod->klass->image == mono_defaults.corlib &&
6350 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6351 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6354 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6356 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6357 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6359 if (fsig->params [0]->type == MONO_TYPE_I1)
6360 opcode = OP_ATOMIC_LOAD_I1;
6361 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6362 opcode = OP_ATOMIC_LOAD_U1;
6363 else if (fsig->params [0]->type == MONO_TYPE_I2)
6364 opcode = OP_ATOMIC_LOAD_I2;
6365 else if (fsig->params [0]->type == MONO_TYPE_U2)
6366 opcode = OP_ATOMIC_LOAD_U2;
6367 else if (fsig->params [0]->type == MONO_TYPE_I4)
6368 opcode = OP_ATOMIC_LOAD_I4;
6369 else if (fsig->params [0]->type == MONO_TYPE_U4)
6370 opcode = OP_ATOMIC_LOAD_U4;
6371 else if (fsig->params [0]->type == MONO_TYPE_R4)
6372 opcode = OP_ATOMIC_LOAD_R4;
6373 else if (fsig->params [0]->type == MONO_TYPE_R8)
6374 opcode = OP_ATOMIC_LOAD_R8;
6375 #if SIZEOF_REGISTER == 8
6376 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6377 opcode = OP_ATOMIC_LOAD_I8;
6378 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6379 opcode = OP_ATOMIC_LOAD_U8;
6381 else if (fsig->params [0]->type == MONO_TYPE_I)
6382 opcode = OP_ATOMIC_LOAD_I4;
6383 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6384 opcode = OP_ATOMIC_LOAD_U4;
6388 if (!mono_arch_opcode_supported (opcode))
6391 MONO_INST_NEW (cfg, ins, opcode);
6392 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6393 ins->sreg1 = args [0]->dreg;
6394 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6395 MONO_ADD_INS (cfg->cbb, ins);
6397 switch (fsig->params [0]->type) {
6398 case MONO_TYPE_BOOLEAN:
6405 ins->type = STACK_I4;
6409 ins->type = STACK_I8;
6413 #if SIZEOF_REGISTER == 8
6414 ins->type = STACK_I8;
6416 ins->type = STACK_I4;
6421 ins->type = STACK_R8;
6424 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6425 ins->type = STACK_OBJ;
6431 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6433 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6435 if (fsig->params [0]->type == MONO_TYPE_I1)
6436 opcode = OP_ATOMIC_STORE_I1;
6437 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6438 opcode = OP_ATOMIC_STORE_U1;
6439 else if (fsig->params [0]->type == MONO_TYPE_I2)
6440 opcode = OP_ATOMIC_STORE_I2;
6441 else if (fsig->params [0]->type == MONO_TYPE_U2)
6442 opcode = OP_ATOMIC_STORE_U2;
6443 else if (fsig->params [0]->type == MONO_TYPE_I4)
6444 opcode = OP_ATOMIC_STORE_I4;
6445 else if (fsig->params [0]->type == MONO_TYPE_U4)
6446 opcode = OP_ATOMIC_STORE_U4;
6447 else if (fsig->params [0]->type == MONO_TYPE_R4)
6448 opcode = OP_ATOMIC_STORE_R4;
6449 else if (fsig->params [0]->type == MONO_TYPE_R8)
6450 opcode = OP_ATOMIC_STORE_R8;
6451 #if SIZEOF_REGISTER == 8
6452 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6453 opcode = OP_ATOMIC_STORE_I8;
6454 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6455 opcode = OP_ATOMIC_STORE_U8;
6457 else if (fsig->params [0]->type == MONO_TYPE_I)
6458 opcode = OP_ATOMIC_STORE_I4;
6459 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6460 opcode = OP_ATOMIC_STORE_U4;
6464 if (!mono_arch_opcode_supported (opcode))
6467 MONO_INST_NEW (cfg, ins, opcode);
6468 ins->dreg = args [0]->dreg;
6469 ins->sreg1 = args [1]->dreg;
6470 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6471 MONO_ADD_INS (cfg->cbb, ins);
6473 if (cfg->gen_write_barriers && is_ref)
6474 emit_write_barrier (cfg, args [0], args [1]);
6480 } else if (cmethod->klass->image == mono_defaults.corlib &&
6481 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6482 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6483 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6484 if (should_insert_brekpoint (cfg->method)) {
6485 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6487 MONO_INST_NEW (cfg, ins, OP_NOP);
6488 MONO_ADD_INS (cfg->cbb, ins);
6492 } else if (cmethod->klass->image == mono_defaults.corlib &&
6493 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6494 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6495 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6497 EMIT_NEW_ICONST (cfg, ins, 1);
6499 EMIT_NEW_ICONST (cfg, ins, 0);
6502 } else if (cmethod->klass == mono_defaults.math_class) {
6504 * There is general branchless code for Min/Max, but it does not work for
6506 * http://everything2.com/?node_id=1051618
6508 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6509 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6510 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6511 !strcmp (cmethod->klass->name, "Selector")) {
6512 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6513 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6514 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6517 MonoJumpInfoToken *ji;
6520 cfg->disable_llvm = TRUE;
6522 if (args [0]->opcode == OP_GOT_ENTRY) {
6523 pi = args [0]->inst_p1;
6524 g_assert (pi->opcode == OP_PATCH_INFO);
6525 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6528 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6529 ji = args [0]->inst_p0;
6532 NULLIFY_INS (args [0]);
6535 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6536 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6537 ins->dreg = mono_alloc_ireg (cfg);
6539 ins->inst_p0 = mono_string_to_utf8 (s);
6540 MONO_ADD_INS (cfg->cbb, ins);
6546 #ifdef MONO_ARCH_SIMD_INTRINSICS
6547 if (cfg->opt & MONO_OPT_SIMD) {
6548 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6554 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6558 if (COMPILE_LLVM (cfg)) {
6559 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6564 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6568 * This entry point could be used later for arbitrary method
6571 inline static MonoInst*
6572 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6573 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6575 if (method->klass == mono_defaults.string_class) {
6576 /* managed string allocation support */
6577 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6578 MonoInst *iargs [2];
6579 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6580 MonoMethod *managed_alloc = NULL;
6582 g_assert (vtable); /*Should not fail since it System.String*/
6583 #ifndef MONO_CROSS_COMPILE
6584 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6588 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6589 iargs [1] = args [0];
6590 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6597 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6599 MonoInst *store, *temp;
6602 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6603 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6606 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6607 * would be different than the MonoInst's used to represent arguments, and
6608 * the ldelema implementation can't deal with that.
6609 * Solution: When ldelema is used on an inline argument, create a var for
6610 * it, emit ldelema on that var, and emit the saving code below in
6611 * inline_method () if needed.
6613 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6614 cfg->args [i] = temp;
6615 /* This uses cfg->args [i] which is set by the preceeding line */
6616 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6617 store->cil_code = sp [0]->cil_code;
6622 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6623 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6625 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6627 check_inline_called_method_name_limit (MonoMethod *called_method)
6630 static const char *limit = NULL;
6632 if (limit == NULL) {
6633 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6635 if (limit_string != NULL)
6636 limit = limit_string;
6641 if (limit [0] != '\0') {
6642 char *called_method_name = mono_method_full_name (called_method, TRUE);
6644 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6645 g_free (called_method_name);
6647 //return (strncmp_result <= 0);
6648 return (strncmp_result == 0);
6655 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6657 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6660 static const char *limit = NULL;
6662 if (limit == NULL) {
6663 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6664 if (limit_string != NULL) {
6665 limit = limit_string;
6671 if (limit [0] != '\0') {
6672 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6674 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6675 g_free (caller_method_name);
6677 //return (strncmp_result <= 0);
6678 return (strncmp_result == 0);
6686 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6688 static double r8_0 = 0.0;
6689 static float r4_0 = 0.0;
6693 rtype = mini_get_underlying_type (cfg, rtype);
6697 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6698 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6699 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6700 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6701 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6702 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6703 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6704 ins->type = STACK_R4;
6705 ins->inst_p0 = (void*)&r4_0;
6707 MONO_ADD_INS (cfg->cbb, ins);
6708 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6709 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6710 ins->type = STACK_R8;
6711 ins->inst_p0 = (void*)&r8_0;
6713 MONO_ADD_INS (cfg->cbb, ins);
6714 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6715 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6716 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6717 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6718 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6720 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6725 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6729 rtype = mini_get_underlying_type (cfg, rtype);
6733 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6734 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6735 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6736 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6737 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6738 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6739 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6740 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6741 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6742 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6743 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6744 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6745 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6746 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6748 emit_init_rvar (cfg, dreg, rtype);
6752 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6754 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6756 MonoInst *var = cfg->locals [local];
6757 if (COMPILE_SOFT_FLOAT (cfg)) {
6759 int reg = alloc_dreg (cfg, var->type);
6760 emit_init_rvar (cfg, reg, type);
6761 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6764 emit_init_rvar (cfg, var->dreg, type);
6766 emit_dummy_init_rvar (cfg, var->dreg, type);
6773 * Return the cost of inlining CMETHOD.
6776 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6777 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6779 MonoInst *ins, *rvar = NULL;
6780 MonoMethodHeader *cheader;
6781 MonoBasicBlock *ebblock, *sbblock;
6783 MonoMethod *prev_inlined_method;
6784 MonoInst **prev_locals, **prev_args;
6785 MonoType **prev_arg_types;
6786 guint prev_real_offset;
6787 GHashTable *prev_cbb_hash;
6788 MonoBasicBlock **prev_cil_offset_to_bb;
6789 MonoBasicBlock *prev_cbb;
6790 unsigned char* prev_cil_start;
6791 guint32 prev_cil_offset_to_bb_len;
6792 MonoMethod *prev_current_method;
6793 MonoGenericContext *prev_generic_context;
6794 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6796 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6798 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6799 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6802 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6803 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6808 fsig = mono_method_signature (cmethod);
6810 if (cfg->verbose_level > 2)
6811 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6813 if (!cmethod->inline_info) {
6814 cfg->stat_inlineable_methods++;
6815 cmethod->inline_info = 1;
6818 /* allocate local variables */
6819 cheader = mono_method_get_header (cmethod);
6821 if (cheader == NULL || mono_loader_get_last_error ()) {
6822 MonoLoaderError *error = mono_loader_get_last_error ();
6825 mono_metadata_free_mh (cheader);
6826 if (inline_always && error)
6827 mono_cfg_set_exception (cfg, error->exception_type);
6829 mono_loader_clear_error ();
6833 /*Must verify before creating locals as it can cause the JIT to assert.*/
6834 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6835 mono_metadata_free_mh (cheader);
6839 /* allocate space to store the return value */
6840 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6841 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6844 prev_locals = cfg->locals;
6845 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6846 for (i = 0; i < cheader->num_locals; ++i)
6847 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6849 /* allocate start and end blocks */
6850 /* This is needed so if the inline is aborted, we can clean up */
6851 NEW_BBLOCK (cfg, sbblock);
6852 sbblock->real_offset = real_offset;
6854 NEW_BBLOCK (cfg, ebblock);
6855 ebblock->block_num = cfg->num_bblocks++;
6856 ebblock->real_offset = real_offset;
6858 prev_args = cfg->args;
6859 prev_arg_types = cfg->arg_types;
6860 prev_inlined_method = cfg->inlined_method;
6861 cfg->inlined_method = cmethod;
6862 cfg->ret_var_set = FALSE;
6863 cfg->inline_depth ++;
6864 prev_real_offset = cfg->real_offset;
6865 prev_cbb_hash = cfg->cbb_hash;
6866 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6867 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6868 prev_cil_start = cfg->cil_start;
6869 prev_cbb = cfg->cbb;
6870 prev_current_method = cfg->current_method;
6871 prev_generic_context = cfg->generic_context;
6872 prev_ret_var_set = cfg->ret_var_set;
6873 prev_disable_inline = cfg->disable_inline;
6875 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6878 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6880 ret_var_set = cfg->ret_var_set;
6882 cfg->inlined_method = prev_inlined_method;
6883 cfg->real_offset = prev_real_offset;
6884 cfg->cbb_hash = prev_cbb_hash;
6885 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6886 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6887 cfg->cil_start = prev_cil_start;
6888 cfg->locals = prev_locals;
6889 cfg->args = prev_args;
6890 cfg->arg_types = prev_arg_types;
6891 cfg->current_method = prev_current_method;
6892 cfg->generic_context = prev_generic_context;
6893 cfg->ret_var_set = prev_ret_var_set;
6894 cfg->disable_inline = prev_disable_inline;
6895 cfg->inline_depth --;
6897 if ((costs >= 0 && costs < 60) || inline_always) {
6898 if (cfg->verbose_level > 2)
6899 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6901 cfg->stat_inlined_methods++;
6903 /* always add some code to avoid block split failures */
6904 MONO_INST_NEW (cfg, ins, OP_NOP);
6905 MONO_ADD_INS (prev_cbb, ins);
6907 prev_cbb->next_bb = sbblock;
6908 link_bblock (cfg, prev_cbb, sbblock);
6911 * Get rid of the begin and end bblocks if possible to aid local
6914 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6916 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6917 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6919 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6920 MonoBasicBlock *prev = ebblock->in_bb [0];
6921 mono_merge_basic_blocks (cfg, prev, ebblock);
6923 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6924 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6925 cfg->cbb = prev_cbb;
6929 * Its possible that the rvar is set in some prev bblock, but not in others.
6935 for (i = 0; i < ebblock->in_count; ++i) {
6936 bb = ebblock->in_bb [i];
6938 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6941 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6950 *out_cbb = cfg->cbb;
6954 * If the inlined method contains only a throw, then the ret var is not
6955 * set, so set it to a dummy value.
6958 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6960 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6963 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6966 if (cfg->verbose_level > 2)
6967 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6968 cfg->exception_type = MONO_EXCEPTION_NONE;
6969 mono_loader_clear_error ();
6971 /* This gets rid of the newly added bblocks */
6972 cfg->cbb = prev_cbb;
6974 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6979 * Some of these comments may well be out-of-date.
6980 * Design decisions: we do a single pass over the IL code (and we do bblock
6981 * splitting/merging in the few cases when it's required: a back jump to an IL
6982 * address that was not already seen as bblock starting point).
6983 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6984 * Complex operations are decomposed in simpler ones right away. We need to let the
6985 * arch-specific code peek and poke inside this process somehow (except when the
6986 * optimizations can take advantage of the full semantic info of coarse opcodes).
6987 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6988 * MonoInst->opcode initially is the IL opcode or some simplification of that
6989 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6990 * opcode with value bigger than OP_LAST.
6991 * At this point the IR can be handed over to an interpreter, a dumb code generator
6992 * or to the optimizing code generator that will translate it to SSA form.
6994 * Profiling directed optimizations.
6995 * We may compile by default with few or no optimizations and instrument the code
6996 * or the user may indicate what methods to optimize the most either in a config file
6997 * or through repeated runs where the compiler applies offline the optimizations to
6998 * each method and then decides if it was worth it.
7001 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7002 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7003 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7004 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7005 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7006 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7007 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7008 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7010 /* offset from br.s -> br like opcodes */
7011 #define BIG_BRANCH_OFFSET 13
7014 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7016 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7018 return b == NULL || b == bb;
7022 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7024 unsigned char *ip = start;
7025 unsigned char *target;
7028 MonoBasicBlock *bblock;
7029 const MonoOpcode *opcode;
7032 cli_addr = ip - start;
7033 i = mono_opcode_value ((const guint8 **)&ip, end);
7036 opcode = &mono_opcodes [i];
7037 switch (opcode->argument) {
7038 case MonoInlineNone:
7041 case MonoInlineString:
7042 case MonoInlineType:
7043 case MonoInlineField:
7044 case MonoInlineMethod:
7047 case MonoShortInlineR:
7054 case MonoShortInlineVar:
7055 case MonoShortInlineI:
7058 case MonoShortInlineBrTarget:
7059 target = start + cli_addr + 2 + (signed char)ip [1];
7060 GET_BBLOCK (cfg, bblock, target);
7063 GET_BBLOCK (cfg, bblock, ip);
7065 case MonoInlineBrTarget:
7066 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7067 GET_BBLOCK (cfg, bblock, target);
7070 GET_BBLOCK (cfg, bblock, ip);
7072 case MonoInlineSwitch: {
7073 guint32 n = read32 (ip + 1);
7076 cli_addr += 5 + 4 * n;
7077 target = start + cli_addr;
7078 GET_BBLOCK (cfg, bblock, target);
7080 for (j = 0; j < n; ++j) {
7081 target = start + cli_addr + (gint32)read32 (ip);
7082 GET_BBLOCK (cfg, bblock, target);
7092 g_assert_not_reached ();
7095 if (i == CEE_THROW) {
7096 unsigned char *bb_start = ip - 1;
7098 /* Find the start of the bblock containing the throw */
7100 while ((bb_start >= start) && !bblock) {
7101 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7105 bblock->out_of_line = 1;
7115 static inline MonoMethod *
7116 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7120 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7121 method = mono_method_get_wrapper_data (m, token);
7124 method = mono_class_inflate_generic_method_checked (method, context, &error);
7125 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7128 method = mono_get_method_full (m->klass->image, token, klass, context);
7134 static inline MonoMethod *
7135 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7137 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7139 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7145 static inline MonoClass*
7146 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7151 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7152 klass = mono_method_get_wrapper_data (method, token);
7154 klass = mono_class_inflate_generic_class (klass, context);
7156 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7157 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7160 mono_class_init (klass);
7164 static inline MonoMethodSignature*
7165 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7167 MonoMethodSignature *fsig;
7169 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7172 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7174 fsig = mono_inflate_generic_signature (fsig, context, &error);
7176 g_assert (mono_error_ok (&error));
7179 fsig = mono_metadata_parse_signature (method->klass->image, token);
7185 * Returns TRUE if the JIT should abort inlining because "callee"
7186 * is influenced by security attributes.
7189 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7193 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7197 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7198 if (result == MONO_JIT_SECURITY_OK)
7201 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7202 /* Generate code to throw a SecurityException before the actual call/link */
7203 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7206 NEW_ICONST (cfg, args [0], 4);
7207 NEW_METHODCONST (cfg, args [1], caller);
7208 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7209 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7210 /* don't hide previous results */
7211 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7212 cfg->exception_data = result;
7220 throw_exception (void)
7222 static MonoMethod *method = NULL;
7225 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7226 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7233 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7235 MonoMethod *thrower = throw_exception ();
7238 EMIT_NEW_PCONST (cfg, args [0], ex);
7239 mono_emit_method_call (cfg, thrower, args, NULL);
7243 * Return the original method is a wrapper is specified. We can only access
7244 * the custom attributes from the original method.
7247 get_original_method (MonoMethod *method)
7249 if (method->wrapper_type == MONO_WRAPPER_NONE)
7252 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7253 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7256 /* in other cases we need to find the original method */
7257 return mono_marshal_method_from_wrapper (method);
7261 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7262 MonoBasicBlock *bblock, unsigned char *ip)
7264 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7265 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7267 emit_throw_exception (cfg, ex);
7271 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7272 MonoBasicBlock *bblock, unsigned char *ip)
7274 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7275 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7277 emit_throw_exception (cfg, ex);
7281 * Check that the IL instructions at ip are the array initialization
7282 * sequence and return the pointer to the data and the size.
7285 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7288 * newarr[System.Int32]
7290 * ldtoken field valuetype ...
7291 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7293 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7295 guint32 token = read32 (ip + 7);
7296 guint32 field_token = read32 (ip + 2);
7297 guint32 field_index = field_token & 0xffffff;
7299 const char *data_ptr;
7301 MonoMethod *cmethod;
7302 MonoClass *dummy_class;
7303 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7307 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7311 *out_field_token = field_token;
7313 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7316 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7318 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7319 case MONO_TYPE_BOOLEAN:
7323 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7324 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7325 case MONO_TYPE_CHAR:
7342 if (size > mono_type_size (field->type, &dummy_align))
7345 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7346 if (!image_is_dynamic (method->klass->image)) {
7347 field_index = read32 (ip + 2) & 0xffffff;
7348 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7349 data_ptr = mono_image_rva_map (method->klass->image, rva);
7350 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7351 /* for aot code we do the lookup on load */
7352 if (aot && data_ptr)
7353 return GUINT_TO_POINTER (rva);
7355 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7357 data_ptr = mono_field_get_data (field);
7365 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7367 char *method_fname = mono_method_full_name (method, TRUE);
7369 MonoMethodHeader *header = mono_method_get_header (method);
7371 if (header->code_size == 0)
7372 method_code = g_strdup ("method body is empty.");
7374 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7376 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7377 g_free (method_fname);
7378 g_free (method_code);
7379 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7383 set_exception_object (MonoCompile *cfg, MonoException *exception)
7385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7386 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7387 cfg->exception_ptr = exception;
7391 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7394 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7395 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7396 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7397 /* Optimize reg-reg moves away */
7399 * Can't optimize other opcodes, since sp[0] might point to
7400 * the last ins of a decomposed opcode.
7402 sp [0]->dreg = (cfg)->locals [n]->dreg;
7404 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7409 * ldloca inhibits many optimizations so try to get rid of it in common
7412 static inline unsigned char *
7413 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7423 local = read16 (ip + 2);
7427 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7428 /* From the INITOBJ case */
7429 token = read32 (ip + 2);
7430 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7431 CHECK_TYPELOAD (klass);
7432 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7433 emit_init_local (cfg, local, type, TRUE);
7441 is_exception_class (MonoClass *class)
7444 if (class == mono_defaults.exception_class)
7446 class = class->parent;
7452 * is_jit_optimizer_disabled:
7454 * Determine whenever M's assembly has a DebuggableAttribute with the
7455 * IsJITOptimizerDisabled flag set.
7458 is_jit_optimizer_disabled (MonoMethod *m)
7460 MonoAssembly *ass = m->klass->image->assembly;
7461 MonoCustomAttrInfo* attrs;
7462 static MonoClass *klass;
7464 gboolean val = FALSE;
7467 if (ass->jit_optimizer_disabled_inited)
7468 return ass->jit_optimizer_disabled;
7471 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7474 ass->jit_optimizer_disabled = FALSE;
7475 mono_memory_barrier ();
7476 ass->jit_optimizer_disabled_inited = TRUE;
7480 attrs = mono_custom_attrs_from_assembly (ass);
7482 for (i = 0; i < attrs->num_attrs; ++i) {
7483 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7485 MonoMethodSignature *sig;
7487 if (!attr->ctor || attr->ctor->klass != klass)
7489 /* Decode the attribute. See reflection.c */
7490 p = (const char*)attr->data;
7491 g_assert (read16 (p) == 0x0001);
7494 // FIXME: Support named parameters
7495 sig = mono_method_signature (attr->ctor);
7496 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7498 /* Two boolean arguments */
7502 mono_custom_attrs_free (attrs);
7505 ass->jit_optimizer_disabled = val;
7506 mono_memory_barrier ();
7507 ass->jit_optimizer_disabled_inited = TRUE;
7513 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7515 gboolean supported_tail_call;
7518 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7519 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7521 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7524 for (i = 0; i < fsig->param_count; ++i) {
7525 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7526 /* These can point to the current method's stack */
7527 supported_tail_call = FALSE;
7529 if (fsig->hasthis && cmethod->klass->valuetype)
7530 /* this might point to the current method's stack */
7531 supported_tail_call = FALSE;
7532 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7533 supported_tail_call = FALSE;
7534 if (cfg->method->save_lmf)
7535 supported_tail_call = FALSE;
7536 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7537 supported_tail_call = FALSE;
7538 if (call_opcode != CEE_CALL)
7539 supported_tail_call = FALSE;
7541 /* Debugging support */
7543 if (supported_tail_call) {
7544 if (!mono_debug_count ())
7545 supported_tail_call = FALSE;
7549 return supported_tail_call;
7552 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7553 * it to the thread local value based on the tls_offset field. Every other kind of access to
7554 * the field causes an assert.
7557 is_magic_tls_access (MonoClassField *field)
7559 if (strcmp (field->name, "tlsdata"))
7561 if (strcmp (field->parent->name, "ThreadLocal`1"))
7563 return field->parent->image == mono_defaults.corlib;
7566 /* emits the code needed to access a managed tls var (like ThreadStatic)
7567 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7568 * pointer for the current thread.
7569 * Returns the MonoInst* representing the address of the tls var.
7572 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7575 int static_data_reg, array_reg, dreg;
7576 int offset2_reg, idx_reg;
7577 // inlined access to the tls data
7578 // idx = (offset >> 24) - 1;
7579 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7580 static_data_reg = alloc_ireg (cfg);
7581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7582 idx_reg = alloc_ireg (cfg);
7583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7586 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7587 array_reg = alloc_ireg (cfg);
7588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7589 offset2_reg = alloc_ireg (cfg);
7590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7591 dreg = alloc_ireg (cfg);
7592 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7597 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7598 * this address is cached per-method in cached_tls_addr.
7601 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7603 MonoInst *load, *addr, *temp, *store, *thread_ins;
7604 MonoClassField *offset_field;
7606 if (*cached_tls_addr) {
7607 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7610 thread_ins = mono_get_thread_intrinsic (cfg);
7611 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7613 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7615 MONO_ADD_INS (cfg->cbb, thread_ins);
7617 MonoMethod *thread_method;
7618 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7619 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7621 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7622 addr->klass = mono_class_from_mono_type (tls_field->type);
7623 addr->type = STACK_MP;
7624 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7625 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7627 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7634 * Handle calls made to ctors from NEWOBJ opcodes.
7636 * REF_BBLOCK will point to the current bblock after the call.
7639 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7640 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7642 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7643 MonoBasicBlock *bblock = *ref_bblock;
7645 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7646 mono_method_is_generic_sharable (cmethod, TRUE)) {
7647 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7648 mono_class_vtable (cfg->domain, cmethod->klass);
7649 CHECK_TYPELOAD (cmethod->klass);
7651 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7652 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7655 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7656 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7658 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7660 CHECK_TYPELOAD (cmethod->klass);
7661 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7666 /* Avoid virtual calls to ctors if possible */
7667 if (mono_class_is_marshalbyref (cmethod->klass))
7668 callvirt_this_arg = sp [0];
7670 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7671 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7672 CHECK_CFG_EXCEPTION;
7673 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7674 mono_method_check_inlining (cfg, cmethod) &&
7675 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7678 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7679 cfg->real_offset += 5;
7681 *inline_costs += costs - 5;
7682 *ref_bblock = bblock;
7684 INLINE_FAILURE ("inline failure");
7685 // FIXME-VT: Clean this up
7686 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7687 GSHAREDVT_FAILURE(*ip);
7688 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7690 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7693 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7694 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7695 } else if (context_used &&
7696 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7697 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7698 MonoInst *cmethod_addr;
7700 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7702 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7703 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7705 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7707 INLINE_FAILURE ("ctor call");
7708 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7709 callvirt_this_arg, NULL, vtable_arg);
7716 * mono_method_to_ir:
7718 * Translate the .net IL into linear IR.
7721 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7722 MonoInst *return_var, MonoInst **inline_args,
7723 guint inline_offset, gboolean is_virtual_call)
7726 MonoInst *ins, **sp, **stack_start;
7727 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7728 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7729 MonoMethod *cmethod, *method_definition;
7730 MonoInst **arg_array;
7731 MonoMethodHeader *header;
7733 guint32 token, ins_flag;
7735 MonoClass *constrained_class = NULL;
7736 unsigned char *ip, *end, *target, *err_pos;
7737 MonoMethodSignature *sig;
7738 MonoGenericContext *generic_context = NULL;
7739 MonoGenericContainer *generic_container = NULL;
7740 MonoType **param_types;
7741 int i, n, start_new_bblock, dreg;
7742 int num_calls = 0, inline_costs = 0;
7743 int breakpoint_id = 0;
7745 MonoBoolean security, pinvoke;
7746 MonoSecurityManager* secman = NULL;
7747 MonoDeclSecurityActions actions;
7748 GSList *class_inits = NULL;
7749 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7751 gboolean init_locals, seq_points, skip_dead_blocks;
7752 gboolean sym_seq_points = FALSE;
7753 MonoInst *cached_tls_addr = NULL;
7754 MonoDebugMethodInfo *minfo;
7755 MonoBitSet *seq_point_locs = NULL;
7756 MonoBitSet *seq_point_set_locs = NULL;
7758 cfg->disable_inline = is_jit_optimizer_disabled (method);
7760 /* serialization and xdomain stuff may need access to private fields and methods */
7761 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7762 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7763 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7764 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7765 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7766 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7768 dont_verify |= mono_security_smcs_hack_enabled ();
7770 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7771 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7772 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7773 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7774 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7776 image = method->klass->image;
7777 header = mono_method_get_header (method);
7779 MonoLoaderError *error;
7781 if ((error = mono_loader_get_last_error ())) {
7782 mono_cfg_set_exception (cfg, error->exception_type);
7784 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7785 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7787 goto exception_exit;
7789 generic_container = mono_method_get_generic_container (method);
7790 sig = mono_method_signature (method);
7791 num_args = sig->hasthis + sig->param_count;
7792 ip = (unsigned char*)header->code;
7793 cfg->cil_start = ip;
7794 end = ip + header->code_size;
7795 cfg->stat_cil_code_size += header->code_size;
7797 seq_points = cfg->gen_seq_points && cfg->method == method;
7798 #ifdef PLATFORM_ANDROID
7799 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7802 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7803 /* We could hit a seq point before attaching to the JIT (#8338) */
7807 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7808 minfo = mono_debug_lookup_method (method);
7810 int i, n_il_offsets;
7814 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7815 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7816 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7817 sym_seq_points = TRUE;
7818 for (i = 0; i < n_il_offsets; ++i) {
7819 if (il_offsets [i] < header->code_size)
7820 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7822 g_free (il_offsets);
7823 g_free (line_numbers);
7824 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7825 /* Methods without line number info like auto-generated property accessors */
7826 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7827 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7828 sym_seq_points = TRUE;
7833 * Methods without init_locals set could cause asserts in various passes
7834 * (#497220). To work around this, we emit dummy initialization opcodes
7835 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7836 * on some platforms.
7838 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7839 init_locals = header->init_locals;
7843 method_definition = method;
7844 while (method_definition->is_inflated) {
7845 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7846 method_definition = imethod->declaring;
7849 /* SkipVerification is not allowed if core-clr is enabled */
7850 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7852 dont_verify_stloc = TRUE;
7855 if (sig->is_inflated)
7856 generic_context = mono_method_get_context (method);
7857 else if (generic_container)
7858 generic_context = &generic_container->context;
7859 cfg->generic_context = generic_context;
7861 if (!cfg->generic_sharing_context)
7862 g_assert (!sig->has_type_parameters);
7864 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7865 g_assert (method->is_inflated);
7866 g_assert (mono_method_get_context (method)->method_inst);
7868 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7869 g_assert (sig->generic_param_count);
7871 if (cfg->method == method) {
7872 cfg->real_offset = 0;
7874 cfg->real_offset = inline_offset;
7877 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7878 cfg->cil_offset_to_bb_len = header->code_size;
7880 cfg->current_method = method;
7882 if (cfg->verbose_level > 2)
7883 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7885 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7887 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7888 for (n = 0; n < sig->param_count; ++n)
7889 param_types [n + sig->hasthis] = sig->params [n];
7890 cfg->arg_types = param_types;
7892 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7893 if (cfg->method == method) {
7895 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7896 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7899 NEW_BBLOCK (cfg, start_bblock);
7900 cfg->bb_entry = start_bblock;
7901 start_bblock->cil_code = NULL;
7902 start_bblock->cil_length = 0;
7903 #if defined(__native_client_codegen__)
7904 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7905 ins->dreg = alloc_dreg (cfg, STACK_I4);
7906 MONO_ADD_INS (start_bblock, ins);
7910 NEW_BBLOCK (cfg, end_bblock);
7911 cfg->bb_exit = end_bblock;
7912 end_bblock->cil_code = NULL;
7913 end_bblock->cil_length = 0;
7914 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7915 g_assert (cfg->num_bblocks == 2);
7917 arg_array = cfg->args;
7919 if (header->num_clauses) {
7920 cfg->spvars = g_hash_table_new (NULL, NULL);
7921 cfg->exvars = g_hash_table_new (NULL, NULL);
7923 /* handle exception clauses */
7924 for (i = 0; i < header->num_clauses; ++i) {
7925 MonoBasicBlock *try_bb;
7926 MonoExceptionClause *clause = &header->clauses [i];
7927 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7928 try_bb->real_offset = clause->try_offset;
7929 try_bb->try_start = TRUE;
7930 try_bb->region = ((i + 1) << 8) | clause->flags;
7931 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7932 tblock->real_offset = clause->handler_offset;
7933 tblock->flags |= BB_EXCEPTION_HANDLER;
7936 * Linking the try block with the EH block hinders inlining as we won't be able to
7937 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7939 if (COMPILE_LLVM (cfg))
7940 link_bblock (cfg, try_bb, tblock);
7942 if (*(ip + clause->handler_offset) == CEE_POP)
7943 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7945 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7946 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7947 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7948 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7949 MONO_ADD_INS (tblock, ins);
7951 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7952 /* finally clauses already have a seq point */
7953 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7954 MONO_ADD_INS (tblock, ins);
7957 /* todo: is a fault block unsafe to optimize? */
7958 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7959 tblock->flags |= BB_EXCEPTION_UNSAFE;
7963 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7965 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7967 /* catch and filter blocks get the exception object on the stack */
7968 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7969 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7970 MonoInst *dummy_use;
7972 /* mostly like handle_stack_args (), but just sets the input args */
7973 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7974 tblock->in_scount = 1;
7975 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7976 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7979 * Add a dummy use for the exvar so its liveness info will be
7983 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7985 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7986 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7987 tblock->flags |= BB_EXCEPTION_HANDLER;
7988 tblock->real_offset = clause->data.filter_offset;
7989 tblock->in_scount = 1;
7990 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7991 /* The filter block shares the exvar with the handler block */
7992 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7993 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7994 MONO_ADD_INS (tblock, ins);
7998 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7999 clause->data.catch_class &&
8000 cfg->generic_sharing_context &&
8001 mono_class_check_context_used (clause->data.catch_class)) {
8003 * In shared generic code with catch
8004 * clauses containing type variables
8005 * the exception handling code has to
8006 * be able to get to the rgctx.
8007 * Therefore we have to make sure that
8008 * the vtable/mrgctx argument (for
8009 * static or generic methods) or the
8010 * "this" argument (for non-static
8011 * methods) are live.
8013 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8014 mini_method_get_context (method)->method_inst ||
8015 method->klass->valuetype) {
8016 mono_get_vtable_var (cfg);
8018 MonoInst *dummy_use;
8020 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8025 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8026 cfg->cbb = start_bblock;
8027 cfg->args = arg_array;
8028 mono_save_args (cfg, sig, inline_args);
8031 /* FIRST CODE BLOCK */
8032 NEW_BBLOCK (cfg, bblock);
8033 bblock->cil_code = ip;
8037 ADD_BBLOCK (cfg, bblock);
8039 if (cfg->method == method) {
8040 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8041 if (breakpoint_id) {
8042 MONO_INST_NEW (cfg, ins, OP_BREAK);
8043 MONO_ADD_INS (bblock, ins);
8047 if (mono_security_cas_enabled ())
8048 secman = mono_security_manager_get_methods ();
8050 security = (secman && mono_security_method_has_declsec (method));
8051 /* at this point having security doesn't mean we have any code to generate */
8052 if (security && (cfg->method == method)) {
8053 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8054 * And we do not want to enter the next section (with allocation) if we
8055 * have nothing to generate */
8056 security = mono_declsec_get_demands (method, &actions);
8059 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8060 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8062 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8063 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8064 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8066 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8067 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8071 mono_custom_attrs_free (custom);
8074 custom = mono_custom_attrs_from_class (wrapped->klass);
8075 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8079 mono_custom_attrs_free (custom);
8082 /* not a P/Invoke after all */
8087 /* we use a separate basic block for the initialization code */
8088 NEW_BBLOCK (cfg, init_localsbb);
8089 cfg->bb_init = init_localsbb;
8090 init_localsbb->real_offset = cfg->real_offset;
8091 start_bblock->next_bb = init_localsbb;
8092 init_localsbb->next_bb = bblock;
8093 link_bblock (cfg, start_bblock, init_localsbb);
8094 link_bblock (cfg, init_localsbb, bblock);
8096 cfg->cbb = init_localsbb;
8098 if (cfg->gsharedvt && cfg->method == method) {
8099 MonoGSharedVtMethodInfo *info;
8100 MonoInst *var, *locals_var;
8103 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8104 info->method = cfg->method;
8105 info->count_entries = 16;
8106 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8107 cfg->gsharedvt_info = info;
8109 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8110 /* prevent it from being register allocated */
8111 //var->flags |= MONO_INST_VOLATILE;
8112 cfg->gsharedvt_info_var = var;
8114 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8115 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8117 /* Allocate locals */
8118 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8119 /* prevent it from being register allocated */
8120 //locals_var->flags |= MONO_INST_VOLATILE;
8121 cfg->gsharedvt_locals_var = locals_var;
8123 dreg = alloc_ireg (cfg);
8124 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8126 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8127 ins->dreg = locals_var->dreg;
8129 MONO_ADD_INS (cfg->cbb, ins);
8130 cfg->gsharedvt_locals_var_ins = ins;
8132 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8135 ins->flags |= MONO_INST_INIT;
8139 /* at this point we know, if security is TRUE, that some code needs to be generated */
8140 if (security && (cfg->method == method)) {
8143 cfg->stat_cas_demand_generation++;
8145 if (actions.demand.blob) {
8146 /* Add code for SecurityAction.Demand */
8147 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8148 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8149 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8150 mono_emit_method_call (cfg, secman->demand, args, NULL);
8152 if (actions.noncasdemand.blob) {
8153 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8154 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8155 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8156 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8157 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8158 mono_emit_method_call (cfg, secman->demand, args, NULL);
8160 if (actions.demandchoice.blob) {
8161 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8162 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8163 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8164 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8165 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8169 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8171 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8174 if (mono_security_core_clr_enabled ()) {
8175 /* check if this is native code, e.g. an icall or a p/invoke */
8176 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8177 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8179 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8180 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8182 /* if this ia a native call then it can only be JITted from platform code */
8183 if ((icall || pinvk) && method->klass && method->klass->image) {
8184 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8185 MonoException *ex = icall ? mono_get_exception_security () :
8186 mono_get_exception_method_access ();
8187 emit_throw_exception (cfg, ex);
8194 CHECK_CFG_EXCEPTION;
8196 if (header->code_size == 0)
8199 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8204 if (cfg->method == method)
8205 mono_debug_init_method (cfg, bblock, breakpoint_id);
8207 for (n = 0; n < header->num_locals; ++n) {
8208 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8213 /* We force the vtable variable here for all shared methods
8214 for the possibility that they might show up in a stack
8215 trace where their exact instantiation is needed. */
8216 if (cfg->generic_sharing_context && method == cfg->method) {
8217 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8218 mini_method_get_context (method)->method_inst ||
8219 method->klass->valuetype) {
8220 mono_get_vtable_var (cfg);
8222 /* FIXME: Is there a better way to do this?
8223 We need the variable live for the duration
8224 of the whole method. */
8225 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8229 /* add a check for this != NULL to inlined methods */
8230 if (is_virtual_call) {
8233 NEW_ARGLOAD (cfg, arg_ins, 0);
8234 MONO_ADD_INS (cfg->cbb, arg_ins);
8235 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8238 skip_dead_blocks = !dont_verify;
8239 if (skip_dead_blocks) {
8240 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8245 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8246 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8249 start_new_bblock = 0;
8252 if (cfg->method == method)
8253 cfg->real_offset = ip - header->code;
8255 cfg->real_offset = inline_offset;
8260 if (start_new_bblock) {
8261 bblock->cil_length = ip - bblock->cil_code;
8262 if (start_new_bblock == 2) {
8263 g_assert (ip == tblock->cil_code);
8265 GET_BBLOCK (cfg, tblock, ip);
8267 bblock->next_bb = tblock;
8270 start_new_bblock = 0;
8271 for (i = 0; i < bblock->in_scount; ++i) {
8272 if (cfg->verbose_level > 3)
8273 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8274 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8278 g_slist_free (class_inits);
8281 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8282 link_bblock (cfg, bblock, tblock);
8283 if (sp != stack_start) {
8284 handle_stack_args (cfg, stack_start, sp - stack_start);
8286 CHECK_UNVERIFIABLE (cfg);
8288 bblock->next_bb = tblock;
8291 for (i = 0; i < bblock->in_scount; ++i) {
8292 if (cfg->verbose_level > 3)
8293 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8294 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8297 g_slist_free (class_inits);
8302 if (skip_dead_blocks) {
8303 int ip_offset = ip - header->code;
8305 if (ip_offset == bb->end)
8309 int op_size = mono_opcode_size (ip, end);
8310 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8312 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8314 if (ip_offset + op_size == bb->end) {
8315 MONO_INST_NEW (cfg, ins, OP_NOP);
8316 MONO_ADD_INS (bblock, ins);
8317 start_new_bblock = 1;
8325 * Sequence points are points where the debugger can place a breakpoint.
8326 * Currently, we generate these automatically at points where the IL
8329 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8331 * Make methods interruptable at the beginning, and at the targets of
8332 * backward branches.
8333 * Also, do this at the start of every bblock in methods with clauses too,
8334 * to be able to handle instructions with inprecise control flow like
8336 * Backward branches are handled at the end of method-to-ir ().
8338 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8340 /* Avoid sequence points on empty IL like .volatile */
8341 // FIXME: Enable this
8342 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8343 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8344 if (sp != stack_start)
8345 ins->flags |= MONO_INST_NONEMPTY_STACK;
8346 MONO_ADD_INS (cfg->cbb, ins);
8349 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8352 bblock->real_offset = cfg->real_offset;
8354 if ((cfg->method == method) && cfg->coverage_info) {
8355 guint32 cil_offset = ip - header->code;
8356 cfg->coverage_info->data [cil_offset].cil_code = ip;
8358 /* TODO: Use an increment here */
8359 #if defined(TARGET_X86)
8360 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8361 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8363 MONO_ADD_INS (cfg->cbb, ins);
8365 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8366 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8370 if (cfg->verbose_level > 3)
8371 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8375 if (seq_points && !sym_seq_points && sp != stack_start) {
8377 * The C# compiler uses these nops to notify the JIT that it should
8378 * insert seq points.
8380 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8381 MONO_ADD_INS (cfg->cbb, ins);
8383 if (cfg->keep_cil_nops)
8384 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8386 MONO_INST_NEW (cfg, ins, OP_NOP);
8388 MONO_ADD_INS (bblock, ins);
8391 if (should_insert_brekpoint (cfg->method)) {
8392 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8394 MONO_INST_NEW (cfg, ins, OP_NOP);
8397 MONO_ADD_INS (bblock, ins);
8403 CHECK_STACK_OVF (1);
8404 n = (*ip)-CEE_LDARG_0;
8406 EMIT_NEW_ARGLOAD (cfg, ins, n);
8414 CHECK_STACK_OVF (1);
8415 n = (*ip)-CEE_LDLOC_0;
8417 EMIT_NEW_LOCLOAD (cfg, ins, n);
8426 n = (*ip)-CEE_STLOC_0;
8429 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8431 emit_stloc_ir (cfg, sp, header, n);
8438 CHECK_STACK_OVF (1);
8441 EMIT_NEW_ARGLOAD (cfg, ins, n);
8447 CHECK_STACK_OVF (1);
8450 NEW_ARGLOADA (cfg, ins, n);
8451 MONO_ADD_INS (cfg->cbb, ins);
8461 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8463 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8468 CHECK_STACK_OVF (1);
8471 EMIT_NEW_LOCLOAD (cfg, ins, n);
8475 case CEE_LDLOCA_S: {
8476 unsigned char *tmp_ip;
8478 CHECK_STACK_OVF (1);
8479 CHECK_LOCAL (ip [1]);
8481 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8487 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8496 CHECK_LOCAL (ip [1]);
8497 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8499 emit_stloc_ir (cfg, sp, header, ip [1]);
8504 CHECK_STACK_OVF (1);
8505 EMIT_NEW_PCONST (cfg, ins, NULL);
8506 ins->type = STACK_OBJ;
8511 CHECK_STACK_OVF (1);
8512 EMIT_NEW_ICONST (cfg, ins, -1);
8525 CHECK_STACK_OVF (1);
8526 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8532 CHECK_STACK_OVF (1);
8534 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8540 CHECK_STACK_OVF (1);
8541 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8547 CHECK_STACK_OVF (1);
8548 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8549 ins->type = STACK_I8;
8550 ins->dreg = alloc_dreg (cfg, STACK_I8);
8552 ins->inst_l = (gint64)read64 (ip);
8553 MONO_ADD_INS (bblock, ins);
8559 gboolean use_aotconst = FALSE;
8561 #ifdef TARGET_POWERPC
8562 /* FIXME: Clean this up */
8563 if (cfg->compile_aot)
8564 use_aotconst = TRUE;
8567 /* FIXME: we should really allocate this only late in the compilation process */
8568 f = mono_domain_alloc (cfg->domain, sizeof (float));
8570 CHECK_STACK_OVF (1);
8576 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8578 dreg = alloc_freg (cfg);
8579 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8580 ins->type = cfg->r4_stack_type;
8582 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8583 ins->type = cfg->r4_stack_type;
8584 ins->dreg = alloc_dreg (cfg, STACK_R8);
8586 MONO_ADD_INS (bblock, ins);
8596 gboolean use_aotconst = FALSE;
8598 #ifdef TARGET_POWERPC
8599 /* FIXME: Clean this up */
8600 if (cfg->compile_aot)
8601 use_aotconst = TRUE;
8604 /* FIXME: we should really allocate this only late in the compilation process */
8605 d = mono_domain_alloc (cfg->domain, sizeof (double));
8607 CHECK_STACK_OVF (1);
8613 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8615 dreg = alloc_freg (cfg);
8616 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8617 ins->type = STACK_R8;
8619 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8620 ins->type = STACK_R8;
8621 ins->dreg = alloc_dreg (cfg, STACK_R8);
8623 MONO_ADD_INS (bblock, ins);
8632 MonoInst *temp, *store;
8634 CHECK_STACK_OVF (1);
8638 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8639 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8641 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8644 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8657 if (sp [0]->type == STACK_R8)
8658 /* we need to pop the value from the x86 FP stack */
8659 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8665 INLINE_FAILURE ("jmp");
8666 GSHAREDVT_FAILURE (*ip);
8669 if (stack_start != sp)
8671 token = read32 (ip + 1);
8672 /* FIXME: check the signature matches */
8673 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8675 if (!cmethod || mono_loader_get_last_error ())
8678 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8679 GENERIC_SHARING_FAILURE (CEE_JMP);
8681 if (mono_security_cas_enabled ())
8682 CHECK_CFG_EXCEPTION;
8684 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8686 if (ARCH_HAVE_OP_TAIL_CALL) {
8687 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8690 /* Handle tail calls similarly to calls */
8691 n = fsig->param_count + fsig->hasthis;
8695 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8696 call->method = cmethod;
8697 call->tail_call = TRUE;
8698 call->signature = mono_method_signature (cmethod);
8699 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8700 call->inst.inst_p0 = cmethod;
8701 for (i = 0; i < n; ++i)
8702 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8704 mono_arch_emit_call (cfg, call);
8705 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8706 MONO_ADD_INS (bblock, (MonoInst*)call);
8708 for (i = 0; i < num_args; ++i)
8709 /* Prevent arguments from being optimized away */
8710 arg_array [i]->flags |= MONO_INST_VOLATILE;
8712 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8713 ins = (MonoInst*)call;
8714 ins->inst_p0 = cmethod;
8715 MONO_ADD_INS (bblock, ins);
8719 start_new_bblock = 1;
8724 MonoMethodSignature *fsig;
8727 token = read32 (ip + 1);
8731 //GSHAREDVT_FAILURE (*ip);
8736 fsig = mini_get_signature (method, token, generic_context);
8738 if (method->dynamic && fsig->pinvoke) {
8742 * This is a call through a function pointer using a pinvoke
8743 * signature. Have to create a wrapper and call that instead.
8744 * FIXME: This is very slow, need to create a wrapper at JIT time
8745 * instead based on the signature.
8747 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8748 EMIT_NEW_PCONST (cfg, args [1], fsig);
8750 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8753 n = fsig->param_count + fsig->hasthis;
8757 //g_assert (!virtual || fsig->hasthis);
8761 inline_costs += 10 * num_calls++;
8764 * Making generic calls out of gsharedvt methods.
8765 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8766 * patching gshared method addresses into a gsharedvt method.
8768 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8770 * We pass the address to the gsharedvt trampoline in the rgctx reg
8772 MonoInst *callee = addr;
8774 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8776 GSHAREDVT_FAILURE (*ip);
8778 addr = emit_get_rgctx_sig (cfg, context_used,
8779 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8780 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8784 /* Prevent inlining of methods with indirect calls */
8785 INLINE_FAILURE ("indirect call");
8787 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8792 * Instead of emitting an indirect call, emit a direct call
8793 * with the contents of the aotconst as the patch info.
8795 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8796 info_type = addr->inst_c1;
8797 info_data = addr->inst_p0;
8799 info_type = addr->inst_right->inst_c1;
8800 info_data = addr->inst_right->inst_left;
8803 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8804 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8809 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8813 /* End of call, INS should contain the result of the call, if any */
8815 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8817 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8820 CHECK_CFG_EXCEPTION;
8824 constrained_class = NULL;
8828 case CEE_CALLVIRT: {
8829 MonoInst *addr = NULL;
8830 MonoMethodSignature *fsig = NULL;
8832 int virtual = *ip == CEE_CALLVIRT;
8833 gboolean pass_imt_from_rgctx = FALSE;
8834 MonoInst *imt_arg = NULL;
8835 MonoInst *keep_this_alive = NULL;
8836 gboolean pass_vtable = FALSE;
8837 gboolean pass_mrgctx = FALSE;
8838 MonoInst *vtable_arg = NULL;
8839 gboolean check_this = FALSE;
8840 gboolean supported_tail_call = FALSE;
8841 gboolean tail_call = FALSE;
8842 gboolean need_seq_point = FALSE;
8843 guint32 call_opcode = *ip;
8844 gboolean emit_widen = TRUE;
8845 gboolean push_res = TRUE;
8846 gboolean skip_ret = FALSE;
8847 gboolean delegate_invoke = FALSE;
8848 gboolean direct_icall = FALSE;
8849 MonoMethod *cil_method;
8852 token = read32 (ip + 1);
8856 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8857 cil_method = cmethod;
8859 if (constrained_class) {
8860 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8861 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8862 g_assert (!cmethod->klass->valuetype);
8863 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg)) {
8864 /* FIXME: gshared type constrained to a primitive type */
8865 GENERIC_SHARING_FAILURE (CEE_CALL);
8870 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8871 if (cfg->verbose_level > 2)
8872 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8873 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8874 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8875 cfg->generic_sharing_context)) {
8876 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8880 if (cfg->verbose_level > 2)
8881 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8883 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8885 * This is needed since get_method_constrained can't find
8886 * the method in klass representing a type var.
8887 * The type var is guaranteed to be a reference type in this
8890 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8891 g_assert (!cmethod->klass->valuetype);
8893 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8899 if (!cmethod || mono_loader_get_last_error ())
8901 if (!dont_verify && !cfg->skip_visibility) {
8902 MonoMethod *target_method = cil_method;
8903 if (method->is_inflated) {
8904 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8906 if (!mono_method_can_access_method (method_definition, target_method) &&
8907 !mono_method_can_access_method (method, cil_method))
8908 METHOD_ACCESS_FAILURE (method, cil_method);
8911 if (mono_security_core_clr_enabled ())
8912 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8914 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8915 /* MS.NET seems to silently convert this to a callvirt */
8920 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8921 * converts to a callvirt.
8923 * tests/bug-515884.il is an example of this behavior
8925 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8926 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8927 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8931 if (!cmethod->klass->inited)
8932 if (!mono_class_init (cmethod->klass))
8933 TYPE_LOAD_ERROR (cmethod->klass);
8935 fsig = mono_method_signature (cmethod);
8938 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8939 mini_class_is_system_array (cmethod->klass)) {
8940 array_rank = cmethod->klass->rank;
8941 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8942 direct_icall = TRUE;
8943 } else if (fsig->pinvoke) {
8944 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8945 check_for_pending_exc, cfg->compile_aot);
8946 fsig = mono_method_signature (wrapper);
8947 } else if (constrained_class) {
8949 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8953 mono_save_token_info (cfg, image, token, cil_method);
8955 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8956 need_seq_point = TRUE;
8958 /* Don't support calls made using type arguments for now */
8960 if (cfg->gsharedvt) {
8961 if (mini_is_gsharedvt_signature (cfg, fsig))
8962 GSHAREDVT_FAILURE (*ip);
8966 if (mono_security_cas_enabled ()) {
8967 if (check_linkdemand (cfg, method, cmethod))
8968 INLINE_FAILURE ("linkdemand");
8969 CHECK_CFG_EXCEPTION;
8972 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8973 g_assert_not_reached ();
8975 n = fsig->param_count + fsig->hasthis;
8977 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8980 if (!cfg->generic_sharing_context)
8981 g_assert (!mono_method_check_context_used (cmethod));
8985 //g_assert (!virtual || fsig->hasthis);
8989 if (constrained_class) {
8990 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8991 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8992 /* The 'Own method' case below */
8993 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8994 /* 'The type parameter is instantiated as a reference type' case below. */
8996 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8997 CHECK_CFG_EXCEPTION;
9004 * We have the `constrained.' prefix opcode.
9006 if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9008 * The type parameter is instantiated as a valuetype,
9009 * but that type doesn't override the method we're
9010 * calling, so we need to box `this'.
9012 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9013 ins->klass = constrained_class;
9014 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9015 CHECK_CFG_EXCEPTION;
9016 } else if (!constrained_class->valuetype) {
9017 int dreg = alloc_ireg_ref (cfg);
9020 * The type parameter is instantiated as a reference
9021 * type. We have a managed pointer on the stack, so
9022 * we need to dereference it here.
9024 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9025 ins->type = STACK_OBJ;
9028 if (cmethod->klass->valuetype) {
9031 /* Interface method */
9034 mono_class_setup_vtable (constrained_class);
9035 CHECK_TYPELOAD (constrained_class);
9036 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9038 TYPE_LOAD_ERROR (constrained_class);
9039 slot = mono_method_get_vtable_slot (cmethod);
9041 TYPE_LOAD_ERROR (cmethod->klass);
9042 cmethod = constrained_class->vtable [ioffset + slot];
9044 if (cmethod->klass == mono_defaults.enum_class) {
9045 /* Enum implements some interfaces, so treat this as the first case */
9046 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9047 ins->klass = constrained_class;
9048 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9049 CHECK_CFG_EXCEPTION;
9054 constrained_class = NULL;
9057 if (check_call_signature (cfg, fsig, sp))
9060 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
9061 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9062 delegate_invoke = TRUE;
9065 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9067 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9068 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9076 * If the callee is a shared method, then its static cctor
9077 * might not get called after the call was patched.
9079 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9080 emit_generic_class_init (cfg, cmethod->klass);
9081 CHECK_TYPELOAD (cmethod->klass);
9084 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9086 if (cfg->generic_sharing_context) {
9087 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9089 context_used = mini_method_check_context_used (cfg, cmethod);
9091 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9092 /* Generic method interface
9093 calls are resolved via a
9094 helper function and don't
9096 if (!cmethod_context || !cmethod_context->method_inst)
9097 pass_imt_from_rgctx = TRUE;
9101 * If a shared method calls another
9102 * shared method then the caller must
9103 * have a generic sharing context
9104 * because the magic trampoline
9105 * requires it. FIXME: We shouldn't
9106 * have to force the vtable/mrgctx
9107 * variable here. Instead there
9108 * should be a flag in the cfg to
9109 * request a generic sharing context.
9112 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9113 mono_get_vtable_var (cfg);
9118 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9120 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9122 CHECK_TYPELOAD (cmethod->klass);
9123 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9128 g_assert (!vtable_arg);
9130 if (!cfg->compile_aot) {
9132 * emit_get_rgctx_method () calls mono_class_vtable () so check
9133 * for type load errors before.
9135 mono_class_setup_vtable (cmethod->klass);
9136 CHECK_TYPELOAD (cmethod->klass);
9139 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9141 /* !marshalbyref is needed to properly handle generic methods + remoting */
9142 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9143 MONO_METHOD_IS_FINAL (cmethod)) &&
9144 !mono_class_is_marshalbyref (cmethod->klass)) {
9151 if (pass_imt_from_rgctx) {
9152 g_assert (!pass_vtable);
9154 imt_arg = emit_get_rgctx_method (cfg, context_used,
9155 cmethod, MONO_RGCTX_INFO_METHOD);
9159 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9161 /* Calling virtual generic methods */
9162 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9163 !(MONO_METHOD_IS_FINAL (cmethod) &&
9164 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9165 fsig->generic_param_count &&
9166 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9167 MonoInst *this_temp, *this_arg_temp, *store;
9168 MonoInst *iargs [4];
9169 gboolean use_imt = FALSE;
9171 g_assert (fsig->is_inflated);
9173 /* Prevent inlining of methods that contain indirect calls */
9174 INLINE_FAILURE ("virtual generic call");
9176 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9177 GSHAREDVT_FAILURE (*ip);
9179 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9180 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9185 g_assert (!imt_arg);
9187 g_assert (cmethod->is_inflated);
9188 imt_arg = emit_get_rgctx_method (cfg, context_used,
9189 cmethod, MONO_RGCTX_INFO_METHOD);
9190 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9192 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9193 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9194 MONO_ADD_INS (bblock, store);
9196 /* FIXME: This should be a managed pointer */
9197 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9199 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9200 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9201 cmethod, MONO_RGCTX_INFO_METHOD);
9202 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9203 addr = mono_emit_jit_icall (cfg,
9204 mono_helper_compile_generic_method, iargs);
9206 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9208 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9215 * Implement a workaround for the inherent races involved in locking:
9221 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9222 * try block, the Exit () won't be executed, see:
9223 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9224 * To work around this, we extend such try blocks to include the last x bytes
9225 * of the Monitor.Enter () call.
9227 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9228 MonoBasicBlock *tbb;
9230 GET_BBLOCK (cfg, tbb, ip + 5);
9232 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9233 * from Monitor.Enter like ArgumentNullException.
9235 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9236 /* Mark this bblock as needing to be extended */
9237 tbb->extend_try_block = TRUE;
9241 /* Conversion to a JIT intrinsic */
9242 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9244 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9245 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9252 if ((cfg->opt & MONO_OPT_INLINE) &&
9253 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9254 mono_method_check_inlining (cfg, cmethod)) {
9256 gboolean always = FALSE;
9258 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9259 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9260 /* Prevent inlining of methods that call wrappers */
9261 INLINE_FAILURE ("wrapper call");
9262 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9266 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9268 cfg->real_offset += 5;
9270 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9271 /* *sp is already set by inline_method */
9276 inline_costs += costs;
9282 /* Tail recursion elimination */
9283 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9284 gboolean has_vtargs = FALSE;
9287 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9288 INLINE_FAILURE ("tail call");
9290 /* keep it simple */
9291 for (i = fsig->param_count - 1; i >= 0; i--) {
9292 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9297 for (i = 0; i < n; ++i)
9298 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9299 MONO_INST_NEW (cfg, ins, OP_BR);
9300 MONO_ADD_INS (bblock, ins);
9301 tblock = start_bblock->out_bb [0];
9302 link_bblock (cfg, bblock, tblock);
9303 ins->inst_target_bb = tblock;
9304 start_new_bblock = 1;
9306 /* skip the CEE_RET, too */
9307 if (ip_in_bb (cfg, bblock, ip + 5))
9314 inline_costs += 10 * num_calls++;
9317 * Making generic calls out of gsharedvt methods.
9318 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9319 * patching gshared method addresses into a gsharedvt method.
9321 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9322 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9323 MonoRgctxInfoType info_type;
9326 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9327 //GSHAREDVT_FAILURE (*ip);
9328 // disable for possible remoting calls
9329 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9330 GSHAREDVT_FAILURE (*ip);
9331 if (fsig->generic_param_count) {
9332 /* virtual generic call */
9333 g_assert (mono_use_imt);
9334 g_assert (!imt_arg);
9335 /* Same as the virtual generic case above */
9336 imt_arg = emit_get_rgctx_method (cfg, context_used,
9337 cmethod, MONO_RGCTX_INFO_METHOD);
9338 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9340 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9341 /* This can happen when we call a fully instantiated iface method */
9342 imt_arg = emit_get_rgctx_method (cfg, context_used,
9343 cmethod, MONO_RGCTX_INFO_METHOD);
9348 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9349 keep_this_alive = sp [0];
9351 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9352 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9354 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9355 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9357 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9361 /* Generic sharing */
9364 * Use this if the callee is gsharedvt sharable too, since
9365 * at runtime we might find an instantiation so the call cannot
9366 * be patched (the 'no_patch' code path in mini-trampolines.c).
9368 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9369 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9370 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9371 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9372 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9373 INLINE_FAILURE ("gshared");
9375 g_assert (cfg->generic_sharing_context && cmethod);
9379 * We are compiling a call to a
9380 * generic method from shared code,
9381 * which means that we have to look up
9382 * the method in the rgctx and do an
9386 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9388 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9389 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9393 /* Direct calls to icalls */
9395 MonoMethod *wrapper;
9398 /* Inline the wrapper */
9399 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9401 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9402 g_assert (costs > 0);
9403 cfg->real_offset += 5;
9405 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9406 /* *sp is already set by inline_method */
9411 inline_costs += costs;
9420 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9421 MonoInst *val = sp [fsig->param_count];
9423 if (val->type == STACK_OBJ) {
9424 MonoInst *iargs [2];
9429 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9432 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9433 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9434 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9435 emit_write_barrier (cfg, addr, val);
9436 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9437 GSHAREDVT_FAILURE (*ip);
9438 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9439 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9441 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9442 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9443 if (!cmethod->klass->element_class->valuetype && !readonly)
9444 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9445 CHECK_TYPELOAD (cmethod->klass);
9448 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9451 g_assert_not_reached ();
9458 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9462 /* Tail prefix / tail call optimization */
9464 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9465 /* FIXME: runtime generic context pointer for jumps? */
9466 /* FIXME: handle this for generic sharing eventually */
9467 if ((ins_flag & MONO_INST_TAILCALL) &&
9468 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9469 supported_tail_call = TRUE;
9471 if (supported_tail_call) {
9474 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9475 INLINE_FAILURE ("tail call");
9477 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9479 if (ARCH_HAVE_OP_TAIL_CALL) {
9480 /* Handle tail calls similarly to normal calls */
9483 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9485 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9486 call->tail_call = TRUE;
9487 call->method = cmethod;
9488 call->signature = mono_method_signature (cmethod);
9491 * We implement tail calls by storing the actual arguments into the
9492 * argument variables, then emitting a CEE_JMP.
9494 for (i = 0; i < n; ++i) {
9495 /* Prevent argument from being register allocated */
9496 arg_array [i]->flags |= MONO_INST_VOLATILE;
9497 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9499 ins = (MonoInst*)call;
9500 ins->inst_p0 = cmethod;
9501 ins->inst_p1 = arg_array [0];
9502 MONO_ADD_INS (bblock, ins);
9503 link_bblock (cfg, bblock, end_bblock);
9504 start_new_bblock = 1;
9506 // FIXME: Eliminate unreachable epilogs
9509 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9510 * only reachable from this call.
9512 GET_BBLOCK (cfg, tblock, ip + 5);
9513 if (tblock == bblock || tblock->in_count == 0)
9522 * Synchronized wrappers.
9523 * Its hard to determine where to replace a method with its synchronized
9524 * wrapper without causing an infinite recursion. The current solution is
9525 * to add the synchronized wrapper in the trampolines, and to
9526 * change the called method to a dummy wrapper, and resolve that wrapper
9527 * to the real method in mono_jit_compile_method ().
9529 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9530 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9531 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9532 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9536 INLINE_FAILURE ("call");
9537 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9538 imt_arg, vtable_arg);
9541 link_bblock (cfg, bblock, end_bblock);
9542 start_new_bblock = 1;
9544 // FIXME: Eliminate unreachable epilogs
9547 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9548 * only reachable from this call.
9550 GET_BBLOCK (cfg, tblock, ip + 5);
9551 if (tblock == bblock || tblock->in_count == 0)
9558 /* End of call, INS should contain the result of the call, if any */
9560 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9563 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9568 if (keep_this_alive) {
9569 MonoInst *dummy_use;
9571 /* See mono_emit_method_call_full () */
9572 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9575 CHECK_CFG_EXCEPTION;
9579 g_assert (*ip == CEE_RET);
9583 constrained_class = NULL;
9585 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9589 if (cfg->method != method) {
9590 /* return from inlined method */
9592 * If in_count == 0, that means the ret is unreachable due to
9593 * being preceeded by a throw. In that case, inline_method () will
9594 * handle setting the return value
9595 * (test case: test_0_inline_throw ()).
9597 if (return_var && cfg->cbb->in_count) {
9598 MonoType *ret_type = mono_method_signature (method)->ret;
9604 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9607 //g_assert (returnvar != -1);
9608 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9609 cfg->ret_var_set = TRUE;
9612 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9614 if (cfg->lmf_var && cfg->cbb->in_count)
9618 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9620 if (seq_points && !sym_seq_points) {
9622 * Place a seq point here too even through the IL stack is not
9623 * empty, so a step over on
9626 * will work correctly.
9628 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9629 MONO_ADD_INS (cfg->cbb, ins);
9632 g_assert (!return_var);
9636 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9639 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9642 if (!cfg->vret_addr) {
9645 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9647 EMIT_NEW_RETLOADA (cfg, ret_addr);
9649 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9650 ins->klass = mono_class_from_mono_type (ret_type);
9653 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9654 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9655 MonoInst *iargs [1];
9659 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9660 mono_arch_emit_setret (cfg, method, conv);
9662 mono_arch_emit_setret (cfg, method, *sp);
9665 mono_arch_emit_setret (cfg, method, *sp);
9670 if (sp != stack_start)
9672 MONO_INST_NEW (cfg, ins, OP_BR);
9674 ins->inst_target_bb = end_bblock;
9675 MONO_ADD_INS (bblock, ins);
9676 link_bblock (cfg, bblock, end_bblock);
9677 start_new_bblock = 1;
9681 MONO_INST_NEW (cfg, ins, OP_BR);
9683 target = ip + 1 + (signed char)(*ip);
9685 GET_BBLOCK (cfg, tblock, target);
9686 link_bblock (cfg, bblock, tblock);
9687 ins->inst_target_bb = tblock;
9688 if (sp != stack_start) {
9689 handle_stack_args (cfg, stack_start, sp - stack_start);
9691 CHECK_UNVERIFIABLE (cfg);
9693 MONO_ADD_INS (bblock, ins);
9694 start_new_bblock = 1;
9695 inline_costs += BRANCH_COST;
9709 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9711 target = ip + 1 + *(signed char*)ip;
9717 inline_costs += BRANCH_COST;
9721 MONO_INST_NEW (cfg, ins, OP_BR);
9724 target = ip + 4 + (gint32)read32(ip);
9726 GET_BBLOCK (cfg, tblock, target);
9727 link_bblock (cfg, bblock, tblock);
9728 ins->inst_target_bb = tblock;
9729 if (sp != stack_start) {
9730 handle_stack_args (cfg, stack_start, sp - stack_start);
9732 CHECK_UNVERIFIABLE (cfg);
9735 MONO_ADD_INS (bblock, ins);
9737 start_new_bblock = 1;
9738 inline_costs += BRANCH_COST;
9745 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9746 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9747 guint32 opsize = is_short ? 1 : 4;
9749 CHECK_OPSIZE (opsize);
9751 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9754 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9759 GET_BBLOCK (cfg, tblock, target);
9760 link_bblock (cfg, bblock, tblock);
9761 GET_BBLOCK (cfg, tblock, ip);
9762 link_bblock (cfg, bblock, tblock);
9764 if (sp != stack_start) {
9765 handle_stack_args (cfg, stack_start, sp - stack_start);
9766 CHECK_UNVERIFIABLE (cfg);
9769 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9770 cmp->sreg1 = sp [0]->dreg;
9771 type_from_op (cfg, cmp, sp [0], NULL);
9774 #if SIZEOF_REGISTER == 4
9775 if (cmp->opcode == OP_LCOMPARE_IMM) {
9776 /* Convert it to OP_LCOMPARE */
9777 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9778 ins->type = STACK_I8;
9779 ins->dreg = alloc_dreg (cfg, STACK_I8);
9781 MONO_ADD_INS (bblock, ins);
9782 cmp->opcode = OP_LCOMPARE;
9783 cmp->sreg2 = ins->dreg;
9786 MONO_ADD_INS (bblock, cmp);
9788 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9789 type_from_op (cfg, ins, sp [0], NULL);
9790 MONO_ADD_INS (bblock, ins);
9791 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9792 GET_BBLOCK (cfg, tblock, target);
9793 ins->inst_true_bb = tblock;
9794 GET_BBLOCK (cfg, tblock, ip);
9795 ins->inst_false_bb = tblock;
9796 start_new_bblock = 2;
9799 inline_costs += BRANCH_COST;
9814 MONO_INST_NEW (cfg, ins, *ip);
9816 target = ip + 4 + (gint32)read32(ip);
9822 inline_costs += BRANCH_COST;
9826 MonoBasicBlock **targets;
9827 MonoBasicBlock *default_bblock;
9828 MonoJumpInfoBBTable *table;
9829 int offset_reg = alloc_preg (cfg);
9830 int target_reg = alloc_preg (cfg);
9831 int table_reg = alloc_preg (cfg);
9832 int sum_reg = alloc_preg (cfg);
9833 gboolean use_op_switch;
9837 n = read32 (ip + 1);
9840 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9844 CHECK_OPSIZE (n * sizeof (guint32));
9845 target = ip + n * sizeof (guint32);
9847 GET_BBLOCK (cfg, default_bblock, target);
9848 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9850 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9851 for (i = 0; i < n; ++i) {
9852 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9853 targets [i] = tblock;
9854 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9858 if (sp != stack_start) {
9860 * Link the current bb with the targets as well, so handle_stack_args
9861 * will set their in_stack correctly.
9863 link_bblock (cfg, bblock, default_bblock);
9864 for (i = 0; i < n; ++i)
9865 link_bblock (cfg, bblock, targets [i]);
9867 handle_stack_args (cfg, stack_start, sp - stack_start);
9869 CHECK_UNVERIFIABLE (cfg);
9872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9876 for (i = 0; i < n; ++i)
9877 link_bblock (cfg, bblock, targets [i]);
9879 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9880 table->table = targets;
9881 table->table_size = n;
9883 use_op_switch = FALSE;
9885 /* ARM implements SWITCH statements differently */
9886 /* FIXME: Make it use the generic implementation */
9887 if (!cfg->compile_aot)
9888 use_op_switch = TRUE;
9891 if (COMPILE_LLVM (cfg))
9892 use_op_switch = TRUE;
9894 cfg->cbb->has_jump_table = 1;
9896 if (use_op_switch) {
9897 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9898 ins->sreg1 = src1->dreg;
9899 ins->inst_p0 = table;
9900 ins->inst_many_bb = targets;
9901 ins->klass = GUINT_TO_POINTER (n);
9902 MONO_ADD_INS (cfg->cbb, ins);
9904 if (sizeof (gpointer) == 8)
9905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9909 #if SIZEOF_REGISTER == 8
9910 /* The upper word might not be zero, and we add it to a 64 bit address later */
9911 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9914 if (cfg->compile_aot) {
9915 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9917 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9918 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9919 ins->inst_p0 = table;
9920 ins->dreg = table_reg;
9921 MONO_ADD_INS (cfg->cbb, ins);
9924 /* FIXME: Use load_memindex */
9925 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9927 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9929 start_new_bblock = 1;
9930 inline_costs += (BRANCH_COST * 2);
9950 dreg = alloc_freg (cfg);
9953 dreg = alloc_lreg (cfg);
9956 dreg = alloc_ireg_ref (cfg);
9959 dreg = alloc_preg (cfg);
9962 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9963 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9964 if (*ip == CEE_LDIND_R4)
9965 ins->type = cfg->r4_stack_type;
9966 ins->flags |= ins_flag;
9967 MONO_ADD_INS (bblock, ins);
9969 if (ins_flag & MONO_INST_VOLATILE) {
9970 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9971 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9987 if (ins_flag & MONO_INST_VOLATILE) {
9988 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9989 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9992 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9993 ins->flags |= ins_flag;
9996 MONO_ADD_INS (bblock, ins);
9998 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9999 emit_write_barrier (cfg, sp [0], sp [1]);
10008 MONO_INST_NEW (cfg, ins, (*ip));
10010 ins->sreg1 = sp [0]->dreg;
10011 ins->sreg2 = sp [1]->dreg;
10012 type_from_op (cfg, ins, sp [0], sp [1]);
10014 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10016 /* Use the immediate opcodes if possible */
10017 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10018 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10019 if (imm_opcode != -1) {
10020 ins->opcode = imm_opcode;
10021 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10024 NULLIFY_INS (sp [1]);
10028 MONO_ADD_INS ((cfg)->cbb, (ins));
10030 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10047 MONO_INST_NEW (cfg, ins, (*ip));
10049 ins->sreg1 = sp [0]->dreg;
10050 ins->sreg2 = sp [1]->dreg;
10051 type_from_op (cfg, ins, sp [0], sp [1]);
10053 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10054 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10056 /* FIXME: Pass opcode to is_inst_imm */
10058 /* Use the immediate opcodes if possible */
10059 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10062 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10063 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10064 /* Keep emulated opcodes which are optimized away later */
10065 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10066 imm_opcode = mono_op_to_op_imm (ins->opcode);
10069 if (imm_opcode != -1) {
10070 ins->opcode = imm_opcode;
10071 if (sp [1]->opcode == OP_I8CONST) {
10072 #if SIZEOF_REGISTER == 8
10073 ins->inst_imm = sp [1]->inst_l;
10075 ins->inst_ls_word = sp [1]->inst_ls_word;
10076 ins->inst_ms_word = sp [1]->inst_ms_word;
10080 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10083 /* Might be followed by an instruction added by add_widen_op */
10084 if (sp [1]->next == NULL)
10085 NULLIFY_INS (sp [1]);
10088 MONO_ADD_INS ((cfg)->cbb, (ins));
10090 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10103 case CEE_CONV_OVF_I8:
10104 case CEE_CONV_OVF_U8:
10105 case CEE_CONV_R_UN:
10108 /* Special case this earlier so we have long constants in the IR */
10109 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10110 int data = sp [-1]->inst_c0;
10111 sp [-1]->opcode = OP_I8CONST;
10112 sp [-1]->type = STACK_I8;
10113 #if SIZEOF_REGISTER == 8
10114 if ((*ip) == CEE_CONV_U8)
10115 sp [-1]->inst_c0 = (guint32)data;
10117 sp [-1]->inst_c0 = data;
10119 sp [-1]->inst_ls_word = data;
10120 if ((*ip) == CEE_CONV_U8)
10121 sp [-1]->inst_ms_word = 0;
10123 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10125 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10132 case CEE_CONV_OVF_I4:
10133 case CEE_CONV_OVF_I1:
10134 case CEE_CONV_OVF_I2:
10135 case CEE_CONV_OVF_I:
10136 case CEE_CONV_OVF_U:
10139 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10140 ADD_UNOP (CEE_CONV_OVF_I8);
10147 case CEE_CONV_OVF_U1:
10148 case CEE_CONV_OVF_U2:
10149 case CEE_CONV_OVF_U4:
10152 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10153 ADD_UNOP (CEE_CONV_OVF_U8);
10160 case CEE_CONV_OVF_I1_UN:
10161 case CEE_CONV_OVF_I2_UN:
10162 case CEE_CONV_OVF_I4_UN:
10163 case CEE_CONV_OVF_I8_UN:
10164 case CEE_CONV_OVF_U1_UN:
10165 case CEE_CONV_OVF_U2_UN:
10166 case CEE_CONV_OVF_U4_UN:
10167 case CEE_CONV_OVF_U8_UN:
10168 case CEE_CONV_OVF_I_UN:
10169 case CEE_CONV_OVF_U_UN:
10176 CHECK_CFG_EXCEPTION;
10180 case CEE_ADD_OVF_UN:
10182 case CEE_MUL_OVF_UN:
10184 case CEE_SUB_OVF_UN:
10190 GSHAREDVT_FAILURE (*ip);
10193 token = read32 (ip + 1);
10194 klass = mini_get_class (method, token, generic_context);
10195 CHECK_TYPELOAD (klass);
10197 if (generic_class_is_reference_type (cfg, klass)) {
10198 MonoInst *store, *load;
10199 int dreg = alloc_ireg_ref (cfg);
10201 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10202 load->flags |= ins_flag;
10203 MONO_ADD_INS (cfg->cbb, load);
10205 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10206 store->flags |= ins_flag;
10207 MONO_ADD_INS (cfg->cbb, store);
10209 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10210 emit_write_barrier (cfg, sp [0], sp [1]);
10212 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10218 int loc_index = -1;
10224 token = read32 (ip + 1);
10225 klass = mini_get_class (method, token, generic_context);
10226 CHECK_TYPELOAD (klass);
10228 /* Optimize the common ldobj+stloc combination */
10231 loc_index = ip [6];
10238 loc_index = ip [5] - CEE_STLOC_0;
10245 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10246 CHECK_LOCAL (loc_index);
10248 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10249 ins->dreg = cfg->locals [loc_index]->dreg;
10250 ins->flags |= ins_flag;
10253 if (ins_flag & MONO_INST_VOLATILE) {
10254 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10255 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10261 /* Optimize the ldobj+stobj combination */
10262 /* The reference case ends up being a load+store anyway */
10263 /* Skip this if the operation is volatile. */
10264 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10269 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10277 ins->flags |= ins_flag;
10280 if (ins_flag & MONO_INST_VOLATILE) {
10281 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10282 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10291 CHECK_STACK_OVF (1);
10293 n = read32 (ip + 1);
10295 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10296 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10297 ins->type = STACK_OBJ;
10300 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10301 MonoInst *iargs [1];
10302 char *str = mono_method_get_wrapper_data (method, n);
10304 if (cfg->compile_aot)
10305 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10307 EMIT_NEW_PCONST (cfg, iargs [0], str);
10308 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10310 if (cfg->opt & MONO_OPT_SHARED) {
10311 MonoInst *iargs [3];
10313 if (cfg->compile_aot) {
10314 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10316 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10317 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10318 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10319 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10320 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10322 if (bblock->out_of_line) {
10323 MonoInst *iargs [2];
10325 if (image == mono_defaults.corlib) {
10327 * Avoid relocations in AOT and save some space by using a
10328 * version of helper_ldstr specialized to mscorlib.
10330 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10331 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10333 /* Avoid creating the string object */
10334 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10335 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10336 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10340 if (cfg->compile_aot) {
10341 NEW_LDSTRCONST (cfg, ins, image, n);
10343 MONO_ADD_INS (bblock, ins);
10346 NEW_PCONST (cfg, ins, NULL);
10347 ins->type = STACK_OBJ;
10348 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10350 OUT_OF_MEMORY_FAILURE;
10353 MONO_ADD_INS (bblock, ins);
10362 MonoInst *iargs [2];
10363 MonoMethodSignature *fsig;
10366 MonoInst *vtable_arg = NULL;
10369 token = read32 (ip + 1);
10370 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10371 if (!cmethod || mono_loader_get_last_error ())
10373 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10376 mono_save_token_info (cfg, image, token, cmethod);
10378 if (!mono_class_init (cmethod->klass))
10379 TYPE_LOAD_ERROR (cmethod->klass);
10381 context_used = mini_method_check_context_used (cfg, cmethod);
10383 if (mono_security_cas_enabled ()) {
10384 if (check_linkdemand (cfg, method, cmethod))
10385 INLINE_FAILURE ("linkdemand");
10386 CHECK_CFG_EXCEPTION;
10387 } else if (mono_security_core_clr_enabled ()) {
10388 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10391 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10392 emit_generic_class_init (cfg, cmethod->klass);
10393 CHECK_TYPELOAD (cmethod->klass);
10397 if (cfg->gsharedvt) {
10398 if (mini_is_gsharedvt_variable_signature (sig))
10399 GSHAREDVT_FAILURE (*ip);
10403 n = fsig->param_count;
10407 * Generate smaller code for the common newobj <exception> instruction in
10408 * argument checking code.
10410 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10411 is_exception_class (cmethod->klass) && n <= 2 &&
10412 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10413 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10414 MonoInst *iargs [3];
10418 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10421 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10424 iargs [1] = sp [0];
10425 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10428 iargs [1] = sp [0];
10429 iargs [2] = sp [1];
10430 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10433 g_assert_not_reached ();
10441 /* move the args to allow room for 'this' in the first position */
10447 /* check_call_signature () requires sp[0] to be set */
10448 this_ins.type = STACK_OBJ;
10449 sp [0] = &this_ins;
10450 if (check_call_signature (cfg, fsig, sp))
10455 if (mini_class_is_system_array (cmethod->klass)) {
10456 *sp = emit_get_rgctx_method (cfg, context_used,
10457 cmethod, MONO_RGCTX_INFO_METHOD);
10459 /* Avoid varargs in the common case */
10460 if (fsig->param_count == 1)
10461 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10462 else if (fsig->param_count == 2)
10463 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10464 else if (fsig->param_count == 3)
10465 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10466 else if (fsig->param_count == 4)
10467 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10469 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10470 } else if (cmethod->string_ctor) {
10471 g_assert (!context_used);
10472 g_assert (!vtable_arg);
10473 /* we simply pass a null pointer */
10474 EMIT_NEW_PCONST (cfg, *sp, NULL);
10475 /* now call the string ctor */
10476 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10478 if (cmethod->klass->valuetype) {
10479 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10480 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10481 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10486 * The code generated by mini_emit_virtual_call () expects
10487 * iargs [0] to be a boxed instance, but luckily the vcall
10488 * will be transformed into a normal call there.
10490 } else if (context_used) {
10491 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10494 MonoVTable *vtable = NULL;
10496 if (!cfg->compile_aot)
10497 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10498 CHECK_TYPELOAD (cmethod->klass);
10501 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10502 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10503 * As a workaround, we call class cctors before allocating objects.
10505 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10506 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10507 if (cfg->verbose_level > 2)
10508 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10509 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10512 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10515 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10518 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10520 /* Now call the actual ctor */
10521 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10522 CHECK_CFG_EXCEPTION;
10525 if (alloc == NULL) {
10527 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10528 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10536 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10537 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10540 case CEE_CASTCLASS:
10544 token = read32 (ip + 1);
10545 klass = mini_get_class (method, token, generic_context);
10546 CHECK_TYPELOAD (klass);
10547 if (sp [0]->type != STACK_OBJ)
10550 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10551 CHECK_CFG_EXCEPTION;
10560 token = read32 (ip + 1);
10561 klass = mini_get_class (method, token, generic_context);
10562 CHECK_TYPELOAD (klass);
10563 if (sp [0]->type != STACK_OBJ)
10566 context_used = mini_class_check_context_used (cfg, klass);
10568 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10569 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10570 MonoInst *args [3];
10577 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10580 if (cfg->compile_aot) {
10581 idx = get_castclass_cache_idx (cfg);
10582 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10584 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10587 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10590 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10591 MonoMethod *mono_isinst;
10592 MonoInst *iargs [1];
10595 mono_isinst = mono_marshal_get_isinst (klass);
10596 iargs [0] = sp [0];
10598 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10599 iargs, ip, cfg->real_offset, TRUE, &bblock);
10600 CHECK_CFG_EXCEPTION;
10601 g_assert (costs > 0);
10604 cfg->real_offset += 5;
10608 inline_costs += costs;
10611 ins = handle_isinst (cfg, klass, *sp, context_used);
10612 CHECK_CFG_EXCEPTION;
10619 case CEE_UNBOX_ANY: {
10620 MonoInst *res, *addr;
10625 token = read32 (ip + 1);
10626 klass = mini_get_class (method, token, generic_context);
10627 CHECK_TYPELOAD (klass);
10629 mono_save_token_info (cfg, image, token, klass);
10631 context_used = mini_class_check_context_used (cfg, klass);
10633 if (mini_is_gsharedvt_klass (cfg, klass)) {
10634 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10636 } else if (generic_class_is_reference_type (cfg, klass)) {
10637 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10638 CHECK_CFG_EXCEPTION;
10639 } else if (mono_class_is_nullable (klass)) {
10640 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10642 addr = handle_unbox (cfg, klass, sp, context_used);
10644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10655 MonoClass *enum_class;
10656 MonoMethod *has_flag;
10662 token = read32 (ip + 1);
10663 klass = mini_get_class (method, token, generic_context);
10664 CHECK_TYPELOAD (klass);
10666 mono_save_token_info (cfg, image, token, klass);
10668 context_used = mini_class_check_context_used (cfg, klass);
10670 if (generic_class_is_reference_type (cfg, klass)) {
10676 if (klass == mono_defaults.void_class)
10678 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10680 /* frequent check in generic code: box (struct), brtrue */
10685 * <push int/long ptr>
10688 * constrained. MyFlags
10689 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10691 * If we find this sequence and the operand types on box and constrained
10692 * are equal, we can emit a specialized instruction sequence instead of
10693 * the very slow HasFlag () call.
10695 if ((cfg->opt & MONO_OPT_INTRINS) &&
10696 /* Cheap checks first. */
10697 ip + 5 + 6 + 5 < end &&
10698 ip [5] == CEE_PREFIX1 &&
10699 ip [6] == CEE_CONSTRAINED_ &&
10700 ip [11] == CEE_CALLVIRT &&
10701 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10702 mono_class_is_enum (klass) &&
10703 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10704 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10705 has_flag->klass == mono_defaults.enum_class &&
10706 !strcmp (has_flag->name, "HasFlag") &&
10707 has_flag->signature->hasthis &&
10708 has_flag->signature->param_count == 1) {
10709 CHECK_TYPELOAD (enum_class);
10711 if (enum_class == klass) {
10712 MonoInst *enum_this, *enum_flag;
10717 enum_this = sp [0];
10718 enum_flag = sp [1];
10720 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10725 // FIXME: LLVM can't handle the inconsistent bb linking
10726 if (!mono_class_is_nullable (klass) &&
10727 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10728 (ip [5] == CEE_BRTRUE ||
10729 ip [5] == CEE_BRTRUE_S ||
10730 ip [5] == CEE_BRFALSE ||
10731 ip [5] == CEE_BRFALSE_S)) {
10732 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10734 MonoBasicBlock *true_bb, *false_bb;
10738 if (cfg->verbose_level > 3) {
10739 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10740 printf ("<box+brtrue opt>\n");
10745 case CEE_BRFALSE_S:
10748 target = ip + 1 + (signed char)(*ip);
10755 target = ip + 4 + (gint)(read32 (ip));
10759 g_assert_not_reached ();
10763 * We need to link both bblocks, since it is needed for handling stack
10764 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10765 * Branching to only one of them would lead to inconsistencies, so
10766 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10768 GET_BBLOCK (cfg, true_bb, target);
10769 GET_BBLOCK (cfg, false_bb, ip);
10771 mono_link_bblock (cfg, cfg->cbb, true_bb);
10772 mono_link_bblock (cfg, cfg->cbb, false_bb);
10774 if (sp != stack_start) {
10775 handle_stack_args (cfg, stack_start, sp - stack_start);
10777 CHECK_UNVERIFIABLE (cfg);
10780 if (COMPILE_LLVM (cfg)) {
10781 dreg = alloc_ireg (cfg);
10782 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10785 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10787 /* The JIT can't eliminate the iconst+compare */
10788 MONO_INST_NEW (cfg, ins, OP_BR);
10789 ins->inst_target_bb = is_true ? true_bb : false_bb;
10790 MONO_ADD_INS (cfg->cbb, ins);
10793 start_new_bblock = 1;
10797 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10799 CHECK_CFG_EXCEPTION;
10808 token = read32 (ip + 1);
10809 klass = mini_get_class (method, token, generic_context);
10810 CHECK_TYPELOAD (klass);
10812 mono_save_token_info (cfg, image, token, klass);
10814 context_used = mini_class_check_context_used (cfg, klass);
10816 if (mono_class_is_nullable (klass)) {
10819 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10820 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10824 ins = handle_unbox (cfg, klass, sp, context_used);
10837 MonoClassField *field;
10838 #ifndef DISABLE_REMOTING
10842 gboolean is_instance;
10844 gpointer addr = NULL;
10845 gboolean is_special_static;
10847 MonoInst *store_val = NULL;
10848 MonoInst *thread_ins;
10851 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10853 if (op == CEE_STFLD) {
10856 store_val = sp [1];
10861 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10863 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10866 if (op == CEE_STSFLD) {
10869 store_val = sp [0];
10874 token = read32 (ip + 1);
10875 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10876 field = mono_method_get_wrapper_data (method, token);
10877 klass = field->parent;
10880 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10883 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10884 FIELD_ACCESS_FAILURE (method, field);
10885 mono_class_init (klass);
10887 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10890 /* if the class is Critical then transparent code cannot access it's fields */
10891 if (!is_instance && mono_security_core_clr_enabled ())
10892 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10894 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10895 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10896 if (mono_security_core_clr_enabled ())
10897 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10901 * LDFLD etc. is usable on static fields as well, so convert those cases to
10904 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10916 g_assert_not_reached ();
10918 is_instance = FALSE;
10921 context_used = mini_class_check_context_used (cfg, klass);
10923 /* INSTANCE CASE */
10925 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10926 if (op == CEE_STFLD) {
10927 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10929 #ifndef DISABLE_REMOTING
10930 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10931 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10932 MonoInst *iargs [5];
10934 GSHAREDVT_FAILURE (op);
10936 iargs [0] = sp [0];
10937 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10938 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10939 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10941 iargs [4] = sp [1];
10943 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10944 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10945 iargs, ip, cfg->real_offset, TRUE, &bblock);
10946 CHECK_CFG_EXCEPTION;
10947 g_assert (costs > 0);
10949 cfg->real_offset += 5;
10951 inline_costs += costs;
10953 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10960 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10962 if (mini_is_gsharedvt_klass (cfg, klass)) {
10963 MonoInst *offset_ins;
10965 context_used = mini_class_check_context_used (cfg, klass);
10967 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10968 dreg = alloc_ireg_mp (cfg);
10969 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10970 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10971 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10973 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10975 if (sp [0]->opcode != OP_LDADDR)
10976 store->flags |= MONO_INST_FAULT;
10978 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10979 /* insert call to write barrier */
10983 dreg = alloc_ireg_mp (cfg);
10984 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10985 emit_write_barrier (cfg, ptr, sp [1]);
10988 store->flags |= ins_flag;
10995 #ifndef DISABLE_REMOTING
10996 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10997 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10998 MonoInst *iargs [4];
11000 GSHAREDVT_FAILURE (op);
11002 iargs [0] = sp [0];
11003 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11004 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11005 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11006 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11007 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11008 iargs, ip, cfg->real_offset, TRUE, &bblock);
11009 CHECK_CFG_EXCEPTION;
11010 g_assert (costs > 0);
11012 cfg->real_offset += 5;
11016 inline_costs += costs;
11018 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11024 if (sp [0]->type == STACK_VTYPE) {
11027 /* Have to compute the address of the variable */
11029 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11031 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11033 g_assert (var->klass == klass);
11035 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11039 if (op == CEE_LDFLDA) {
11040 if (is_magic_tls_access (field)) {
11041 GSHAREDVT_FAILURE (*ip);
11043 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
11045 if (sp [0]->type == STACK_OBJ) {
11046 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11047 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11050 dreg = alloc_ireg_mp (cfg);
11052 if (mini_is_gsharedvt_klass (cfg, klass)) {
11053 MonoInst *offset_ins;
11055 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11056 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11058 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11060 ins->klass = mono_class_from_mono_type (field->type);
11061 ins->type = STACK_MP;
11067 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11069 if (mini_is_gsharedvt_klass (cfg, klass)) {
11070 MonoInst *offset_ins;
11072 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11073 dreg = alloc_ireg_mp (cfg);
11074 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11075 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11077 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11079 load->flags |= ins_flag;
11080 if (sp [0]->opcode != OP_LDADDR)
11081 load->flags |= MONO_INST_FAULT;
11095 * We can only support shared generic static
11096 * field access on architectures where the
11097 * trampoline code has been extended to handle
11098 * the generic class init.
11100 #ifndef MONO_ARCH_VTABLE_REG
11101 GENERIC_SHARING_FAILURE (op);
11104 context_used = mini_class_check_context_used (cfg, klass);
11106 ftype = mono_field_get_type (field);
11108 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11111 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11112 * to be called here.
11114 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11115 mono_class_vtable (cfg->domain, klass);
11116 CHECK_TYPELOAD (klass);
11118 mono_domain_lock (cfg->domain);
11119 if (cfg->domain->special_static_fields)
11120 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11121 mono_domain_unlock (cfg->domain);
11123 is_special_static = mono_class_field_is_special_static (field);
11125 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11126 thread_ins = mono_get_thread_intrinsic (cfg);
11130 /* Generate IR to compute the field address */
11131 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11133 * Fast access to TLS data
11134 * Inline version of get_thread_static_data () in
11138 int idx, static_data_reg, array_reg, dreg;
11140 GSHAREDVT_FAILURE (op);
11142 // offset &= 0x7fffffff;
11143 // idx = (offset >> 24) - 1;
11144 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11145 MONO_ADD_INS (cfg->cbb, thread_ins);
11146 static_data_reg = alloc_ireg (cfg);
11147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11149 if (cfg->compile_aot) {
11150 int offset_reg, offset2_reg, idx_reg;
11152 /* For TLS variables, this will return the TLS offset */
11153 EMIT_NEW_SFLDACONST (cfg, ins, field);
11154 offset_reg = ins->dreg;
11155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11156 idx_reg = alloc_ireg (cfg);
11157 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11160 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11161 array_reg = alloc_ireg (cfg);
11162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11163 offset2_reg = alloc_ireg (cfg);
11164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11165 dreg = alloc_ireg (cfg);
11166 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11168 offset = (gsize)addr & 0x7fffffff;
11169 idx = (offset >> 24) - 1;
11171 array_reg = alloc_ireg (cfg);
11172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11173 dreg = alloc_ireg (cfg);
11174 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11176 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11177 (cfg->compile_aot && is_special_static) ||
11178 (context_used && is_special_static)) {
11179 MonoInst *iargs [2];
11181 g_assert (field->parent);
11182 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11183 if (context_used) {
11184 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11185 field, MONO_RGCTX_INFO_CLASS_FIELD);
11187 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11189 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11190 } else if (context_used) {
11191 MonoInst *static_data;
11194 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11195 method->klass->name_space, method->klass->name, method->name,
11196 depth, field->offset);
11199 if (mono_class_needs_cctor_run (klass, method))
11200 emit_generic_class_init (cfg, klass);
11203 * The pointer we're computing here is
11205 * super_info.static_data + field->offset
11207 static_data = emit_get_rgctx_klass (cfg, context_used,
11208 klass, MONO_RGCTX_INFO_STATIC_DATA);
11210 if (mini_is_gsharedvt_klass (cfg, klass)) {
11211 MonoInst *offset_ins;
11213 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11214 dreg = alloc_ireg_mp (cfg);
11215 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11216 } else if (field->offset == 0) {
11219 int addr_reg = mono_alloc_preg (cfg);
11220 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11222 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11223 MonoInst *iargs [2];
11225 g_assert (field->parent);
11226 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11227 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11228 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11230 MonoVTable *vtable = NULL;
11232 if (!cfg->compile_aot)
11233 vtable = mono_class_vtable (cfg->domain, klass);
11234 CHECK_TYPELOAD (klass);
11237 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11238 if (!(g_slist_find (class_inits, klass))) {
11239 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11240 if (cfg->verbose_level > 2)
11241 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11242 class_inits = g_slist_prepend (class_inits, klass);
11245 if (cfg->run_cctors) {
11247 /* This makes so that inline cannot trigger */
11248 /* .cctors: too many apps depend on them */
11249 /* running with a specific order... */
11251 if (! vtable->initialized)
11252 INLINE_FAILURE ("class init");
11253 ex = mono_runtime_class_init_full (vtable, FALSE);
11255 set_exception_object (cfg, ex);
11256 goto exception_exit;
11260 if (cfg->compile_aot)
11261 EMIT_NEW_SFLDACONST (cfg, ins, field);
11264 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11266 EMIT_NEW_PCONST (cfg, ins, addr);
11269 MonoInst *iargs [1];
11270 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11271 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11275 /* Generate IR to do the actual load/store operation */
11277 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11278 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11279 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11282 if (op == CEE_LDSFLDA) {
11283 ins->klass = mono_class_from_mono_type (ftype);
11284 ins->type = STACK_PTR;
11286 } else if (op == CEE_STSFLD) {
11289 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11290 store->flags |= ins_flag;
11292 gboolean is_const = FALSE;
11293 MonoVTable *vtable = NULL;
11294 gpointer addr = NULL;
11296 if (!context_used) {
11297 vtable = mono_class_vtable (cfg->domain, klass);
11298 CHECK_TYPELOAD (klass);
11300 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11301 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11302 int ro_type = ftype->type;
11304 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11305 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11306 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11309 GSHAREDVT_FAILURE (op);
11311 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11314 case MONO_TYPE_BOOLEAN:
11316 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11320 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11323 case MONO_TYPE_CHAR:
11325 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11329 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11334 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11338 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11343 case MONO_TYPE_PTR:
11344 case MONO_TYPE_FNPTR:
11345 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11346 type_to_eval_stack_type ((cfg), field->type, *sp);
11349 case MONO_TYPE_STRING:
11350 case MONO_TYPE_OBJECT:
11351 case MONO_TYPE_CLASS:
11352 case MONO_TYPE_SZARRAY:
11353 case MONO_TYPE_ARRAY:
11354 if (!mono_gc_is_moving ()) {
11355 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11356 type_to_eval_stack_type ((cfg), field->type, *sp);
11364 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11369 case MONO_TYPE_VALUETYPE:
11379 CHECK_STACK_OVF (1);
11381 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11382 load->flags |= ins_flag;
11388 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11389 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11390 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11401 token = read32 (ip + 1);
11402 klass = mini_get_class (method, token, generic_context);
11403 CHECK_TYPELOAD (klass);
11404 if (ins_flag & MONO_INST_VOLATILE) {
11405 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11406 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11408 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11409 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11410 ins->flags |= ins_flag;
11411 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11412 generic_class_is_reference_type (cfg, klass)) {
11413 /* insert call to write barrier */
11414 emit_write_barrier (cfg, sp [0], sp [1]);
11426 const char *data_ptr;
11428 guint32 field_token;
11434 token = read32 (ip + 1);
11436 klass = mini_get_class (method, token, generic_context);
11437 CHECK_TYPELOAD (klass);
11439 context_used = mini_class_check_context_used (cfg, klass);
11441 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11442 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11443 ins->sreg1 = sp [0]->dreg;
11444 ins->type = STACK_I4;
11445 ins->dreg = alloc_ireg (cfg);
11446 MONO_ADD_INS (cfg->cbb, ins);
11447 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11450 if (context_used) {
11451 MonoInst *args [3];
11452 MonoClass *array_class = mono_array_class_get (klass, 1);
11453 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11455 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11458 args [0] = emit_get_rgctx_klass (cfg, context_used,
11459 array_class, MONO_RGCTX_INFO_VTABLE);
11464 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11466 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11468 if (cfg->opt & MONO_OPT_SHARED) {
11469 /* Decompose now to avoid problems with references to the domainvar */
11470 MonoInst *iargs [3];
11472 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11473 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11474 iargs [2] = sp [0];
11476 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11478 /* Decompose later since it is needed by abcrem */
11479 MonoClass *array_type = mono_array_class_get (klass, 1);
11480 mono_class_vtable (cfg->domain, array_type);
11481 CHECK_TYPELOAD (array_type);
11483 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11484 ins->dreg = alloc_ireg_ref (cfg);
11485 ins->sreg1 = sp [0]->dreg;
11486 ins->inst_newa_class = klass;
11487 ins->type = STACK_OBJ;
11488 ins->klass = array_type;
11489 MONO_ADD_INS (cfg->cbb, ins);
11490 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11491 cfg->cbb->has_array_access = TRUE;
11493 /* Needed so mono_emit_load_get_addr () gets called */
11494 mono_get_got_var (cfg);
11504 * we inline/optimize the initialization sequence if possible.
11505 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11506 * for small sizes open code the memcpy
11507 * ensure the rva field is big enough
11509 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11510 MonoMethod *memcpy_method = get_memcpy_method ();
11511 MonoInst *iargs [3];
11512 int add_reg = alloc_ireg_mp (cfg);
11514 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11515 if (cfg->compile_aot) {
11516 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11518 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11520 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11521 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11530 if (sp [0]->type != STACK_OBJ)
11533 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11534 ins->dreg = alloc_preg (cfg);
11535 ins->sreg1 = sp [0]->dreg;
11536 ins->type = STACK_I4;
11537 /* This flag will be inherited by the decomposition */
11538 ins->flags |= MONO_INST_FAULT;
11539 MONO_ADD_INS (cfg->cbb, ins);
11540 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11541 cfg->cbb->has_array_access = TRUE;
11549 if (sp [0]->type != STACK_OBJ)
11552 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11554 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11555 CHECK_TYPELOAD (klass);
11556 /* we need to make sure that this array is exactly the type it needs
11557 * to be for correctness. the wrappers are lax with their usage
11558 * so we need to ignore them here
11560 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11561 MonoClass *array_class = mono_array_class_get (klass, 1);
11562 mini_emit_check_array_type (cfg, sp [0], array_class);
11563 CHECK_TYPELOAD (array_class);
11567 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11572 case CEE_LDELEM_I1:
11573 case CEE_LDELEM_U1:
11574 case CEE_LDELEM_I2:
11575 case CEE_LDELEM_U2:
11576 case CEE_LDELEM_I4:
11577 case CEE_LDELEM_U4:
11578 case CEE_LDELEM_I8:
11580 case CEE_LDELEM_R4:
11581 case CEE_LDELEM_R8:
11582 case CEE_LDELEM_REF: {
11588 if (*ip == CEE_LDELEM) {
11590 token = read32 (ip + 1);
11591 klass = mini_get_class (method, token, generic_context);
11592 CHECK_TYPELOAD (klass);
11593 mono_class_init (klass);
11596 klass = array_access_to_klass (*ip);
11598 if (sp [0]->type != STACK_OBJ)
11601 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11603 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11604 // FIXME-VT: OP_ICONST optimization
11605 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11606 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11607 ins->opcode = OP_LOADV_MEMBASE;
11608 } else if (sp [1]->opcode == OP_ICONST) {
11609 int array_reg = sp [0]->dreg;
11610 int index_reg = sp [1]->dreg;
11611 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11613 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11614 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11616 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11617 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11620 if (*ip == CEE_LDELEM)
11627 case CEE_STELEM_I1:
11628 case CEE_STELEM_I2:
11629 case CEE_STELEM_I4:
11630 case CEE_STELEM_I8:
11631 case CEE_STELEM_R4:
11632 case CEE_STELEM_R8:
11633 case CEE_STELEM_REF:
11638 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11640 if (*ip == CEE_STELEM) {
11642 token = read32 (ip + 1);
11643 klass = mini_get_class (method, token, generic_context);
11644 CHECK_TYPELOAD (klass);
11645 mono_class_init (klass);
11648 klass = array_access_to_klass (*ip);
11650 if (sp [0]->type != STACK_OBJ)
11653 emit_array_store (cfg, klass, sp, TRUE);
11655 if (*ip == CEE_STELEM)
11662 case CEE_CKFINITE: {
11666 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11667 ins->sreg1 = sp [0]->dreg;
11668 ins->dreg = alloc_freg (cfg);
11669 ins->type = STACK_R8;
11670 MONO_ADD_INS (bblock, ins);
11672 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11677 case CEE_REFANYVAL: {
11678 MonoInst *src_var, *src;
11680 int klass_reg = alloc_preg (cfg);
11681 int dreg = alloc_preg (cfg);
11683 GSHAREDVT_FAILURE (*ip);
11686 MONO_INST_NEW (cfg, ins, *ip);
11689 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11690 CHECK_TYPELOAD (klass);
11692 context_used = mini_class_check_context_used (cfg, klass);
11695 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11697 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11698 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11701 if (context_used) {
11702 MonoInst *klass_ins;
11704 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11705 klass, MONO_RGCTX_INFO_KLASS);
11708 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11709 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11711 mini_emit_class_check (cfg, klass_reg, klass);
11713 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11714 ins->type = STACK_MP;
11719 case CEE_MKREFANY: {
11720 MonoInst *loc, *addr;
11722 GSHAREDVT_FAILURE (*ip);
11725 MONO_INST_NEW (cfg, ins, *ip);
11728 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11729 CHECK_TYPELOAD (klass);
11731 context_used = mini_class_check_context_used (cfg, klass);
11733 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11734 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11736 if (context_used) {
11737 MonoInst *const_ins;
11738 int type_reg = alloc_preg (cfg);
11740 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11744 } else if (cfg->compile_aot) {
11745 int const_reg = alloc_preg (cfg);
11746 int type_reg = alloc_preg (cfg);
11748 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11753 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11758 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11759 ins->type = STACK_VTYPE;
11760 ins->klass = mono_defaults.typed_reference_class;
11765 case CEE_LDTOKEN: {
11767 MonoClass *handle_class;
11769 CHECK_STACK_OVF (1);
11772 n = read32 (ip + 1);
11774 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11775 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11776 handle = mono_method_get_wrapper_data (method, n);
11777 handle_class = mono_method_get_wrapper_data (method, n + 1);
11778 if (handle_class == mono_defaults.typehandle_class)
11779 handle = &((MonoClass*)handle)->byval_arg;
11782 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11787 mono_class_init (handle_class);
11788 if (cfg->generic_sharing_context) {
11789 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11790 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11791 /* This case handles ldtoken
11792 of an open type, like for
11795 } else if (handle_class == mono_defaults.typehandle_class) {
11796 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11797 } else if (handle_class == mono_defaults.fieldhandle_class)
11798 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11799 else if (handle_class == mono_defaults.methodhandle_class)
11800 context_used = mini_method_check_context_used (cfg, handle);
11802 g_assert_not_reached ();
11805 if ((cfg->opt & MONO_OPT_SHARED) &&
11806 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11807 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11808 MonoInst *addr, *vtvar, *iargs [3];
11809 int method_context_used;
11811 method_context_used = mini_method_check_context_used (cfg, method);
11813 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11815 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11816 EMIT_NEW_ICONST (cfg, iargs [1], n);
11817 if (method_context_used) {
11818 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11819 method, MONO_RGCTX_INFO_METHOD);
11820 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11822 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11823 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11825 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11829 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11831 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11832 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11833 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11834 (cmethod->klass == mono_defaults.systemtype_class) &&
11835 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11836 MonoClass *tclass = mono_class_from_mono_type (handle);
11838 mono_class_init (tclass);
11839 if (context_used) {
11840 ins = emit_get_rgctx_klass (cfg, context_used,
11841 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11842 } else if (cfg->compile_aot) {
11843 if (method->wrapper_type) {
11844 mono_error_init (&error); //got to do it since there are multiple conditionals below
11845 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11846 /* Special case for static synchronized wrappers */
11847 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11849 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11850 /* FIXME: n is not a normal token */
11852 EMIT_NEW_PCONST (cfg, ins, NULL);
11855 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11858 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11860 ins->type = STACK_OBJ;
11861 ins->klass = cmethod->klass;
11864 MonoInst *addr, *vtvar;
11866 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11868 if (context_used) {
11869 if (handle_class == mono_defaults.typehandle_class) {
11870 ins = emit_get_rgctx_klass (cfg, context_used,
11871 mono_class_from_mono_type (handle),
11872 MONO_RGCTX_INFO_TYPE);
11873 } else if (handle_class == mono_defaults.methodhandle_class) {
11874 ins = emit_get_rgctx_method (cfg, context_used,
11875 handle, MONO_RGCTX_INFO_METHOD);
11876 } else if (handle_class == mono_defaults.fieldhandle_class) {
11877 ins = emit_get_rgctx_field (cfg, context_used,
11878 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11880 g_assert_not_reached ();
11882 } else if (cfg->compile_aot) {
11883 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11885 EMIT_NEW_PCONST (cfg, ins, handle);
11887 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11889 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11899 MONO_INST_NEW (cfg, ins, OP_THROW);
11901 ins->sreg1 = sp [0]->dreg;
11903 bblock->out_of_line = TRUE;
11904 MONO_ADD_INS (bblock, ins);
11905 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11906 MONO_ADD_INS (bblock, ins);
11909 link_bblock (cfg, bblock, end_bblock);
11910 start_new_bblock = 1;
11912 case CEE_ENDFINALLY:
11913 /* mono_save_seq_point_info () depends on this */
11914 if (sp != stack_start)
11915 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11916 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11917 MONO_ADD_INS (bblock, ins);
11919 start_new_bblock = 1;
11922 * Control will leave the method so empty the stack, otherwise
11923 * the next basic block will start with a nonempty stack.
11925 while (sp != stack_start) {
11930 case CEE_LEAVE_S: {
11933 if (*ip == CEE_LEAVE) {
11935 target = ip + 5 + (gint32)read32(ip + 1);
11938 target = ip + 2 + (signed char)(ip [1]);
11941 /* empty the stack */
11942 while (sp != stack_start) {
11947 * If this leave statement is in a catch block, check for a
11948 * pending exception, and rethrow it if necessary.
11949 * We avoid doing this in runtime invoke wrappers, since those are called
11950 * by native code which excepts the wrapper to catch all exceptions.
11952 for (i = 0; i < header->num_clauses; ++i) {
11953 MonoExceptionClause *clause = &header->clauses [i];
11956 * Use <= in the final comparison to handle clauses with multiple
11957 * leave statements, like in bug #78024.
11958 * The ordering of the exception clauses guarantees that we find the
11959 * innermost clause.
11961 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11963 MonoBasicBlock *dont_throw;
11968 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11971 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11973 NEW_BBLOCK (cfg, dont_throw);
11976 * Currently, we always rethrow the abort exception, despite the
11977 * fact that this is not correct. See thread6.cs for an example.
11978 * But propagating the abort exception is more important than
11979 * getting the sematics right.
11981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11982 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11983 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11985 MONO_START_BB (cfg, dont_throw);
11990 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11992 MonoExceptionClause *clause;
11994 for (tmp = handlers; tmp; tmp = tmp->next) {
11995 clause = tmp->data;
11996 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11998 link_bblock (cfg, bblock, tblock);
11999 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12000 ins->inst_target_bb = tblock;
12001 ins->inst_eh_block = clause;
12002 MONO_ADD_INS (bblock, ins);
12003 bblock->has_call_handler = 1;
12004 if (COMPILE_LLVM (cfg)) {
12005 MonoBasicBlock *target_bb;
12008 * Link the finally bblock with the target, since it will
12009 * conceptually branch there.
12010 * FIXME: Have to link the bblock containing the endfinally.
12012 GET_BBLOCK (cfg, target_bb, target);
12013 link_bblock (cfg, tblock, target_bb);
12016 g_list_free (handlers);
12019 MONO_INST_NEW (cfg, ins, OP_BR);
12020 MONO_ADD_INS (bblock, ins);
12021 GET_BBLOCK (cfg, tblock, target);
12022 link_bblock (cfg, bblock, tblock);
12023 ins->inst_target_bb = tblock;
12024 start_new_bblock = 1;
12026 if (*ip == CEE_LEAVE)
12035 * Mono specific opcodes
12037 case MONO_CUSTOM_PREFIX: {
12039 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12043 case CEE_MONO_ICALL: {
12045 MonoJitICallInfo *info;
12047 token = read32 (ip + 2);
12048 func = mono_method_get_wrapper_data (method, token);
12049 info = mono_find_jit_icall_by_addr (func);
12051 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12054 CHECK_STACK (info->sig->param_count);
12055 sp -= info->sig->param_count;
12057 ins = mono_emit_jit_icall (cfg, info->func, sp);
12058 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12062 inline_costs += 10 * num_calls++;
12066 case CEE_MONO_LDPTR_CARD_TABLE: {
12068 gpointer card_mask;
12069 CHECK_STACK_OVF (1);
12071 if (cfg->compile_aot)
12072 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12074 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12078 inline_costs += 10 * num_calls++;
12081 case CEE_MONO_LDPTR_NURSERY_START: {
12084 CHECK_STACK_OVF (1);
12086 if (cfg->compile_aot)
12087 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12089 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12093 inline_costs += 10 * num_calls++;
12096 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12097 CHECK_STACK_OVF (1);
12099 if (cfg->compile_aot)
12100 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12102 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12106 inline_costs += 10 * num_calls++;
12109 case CEE_MONO_LDPTR: {
12112 CHECK_STACK_OVF (1);
12114 token = read32 (ip + 2);
12116 ptr = mono_method_get_wrapper_data (method, token);
12117 EMIT_NEW_PCONST (cfg, ins, ptr);
12120 inline_costs += 10 * num_calls++;
12121 /* Can't embed random pointers into AOT code */
12125 case CEE_MONO_JIT_ICALL_ADDR: {
12126 MonoJitICallInfo *callinfo;
12129 CHECK_STACK_OVF (1);
12131 token = read32 (ip + 2);
12133 ptr = mono_method_get_wrapper_data (method, token);
12134 callinfo = mono_find_jit_icall_by_addr (ptr);
12135 g_assert (callinfo);
12136 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12139 inline_costs += 10 * num_calls++;
12142 case CEE_MONO_ICALL_ADDR: {
12143 MonoMethod *cmethod;
12146 CHECK_STACK_OVF (1);
12148 token = read32 (ip + 2);
12150 cmethod = mono_method_get_wrapper_data (method, token);
12152 if (cfg->compile_aot) {
12153 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12155 ptr = mono_lookup_internal_call (cmethod);
12157 EMIT_NEW_PCONST (cfg, ins, ptr);
12163 case CEE_MONO_VTADDR: {
12164 MonoInst *src_var, *src;
12170 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12171 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12176 case CEE_MONO_NEWOBJ: {
12177 MonoInst *iargs [2];
12179 CHECK_STACK_OVF (1);
12181 token = read32 (ip + 2);
12182 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12183 mono_class_init (klass);
12184 NEW_DOMAINCONST (cfg, iargs [0]);
12185 MONO_ADD_INS (cfg->cbb, iargs [0]);
12186 NEW_CLASSCONST (cfg, iargs [1], klass);
12187 MONO_ADD_INS (cfg->cbb, iargs [1]);
12188 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12190 inline_costs += 10 * num_calls++;
12193 case CEE_MONO_OBJADDR:
12196 MONO_INST_NEW (cfg, ins, OP_MOVE);
12197 ins->dreg = alloc_ireg_mp (cfg);
12198 ins->sreg1 = sp [0]->dreg;
12199 ins->type = STACK_MP;
12200 MONO_ADD_INS (cfg->cbb, ins);
12204 case CEE_MONO_LDNATIVEOBJ:
12206 * Similar to LDOBJ, but instead load the unmanaged
12207 * representation of the vtype to the stack.
12212 token = read32 (ip + 2);
12213 klass = mono_method_get_wrapper_data (method, token);
12214 g_assert (klass->valuetype);
12215 mono_class_init (klass);
12218 MonoInst *src, *dest, *temp;
12221 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12222 temp->backend.is_pinvoke = 1;
12223 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12224 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12226 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12227 dest->type = STACK_VTYPE;
12228 dest->klass = klass;
12234 case CEE_MONO_RETOBJ: {
12236 * Same as RET, but return the native representation of a vtype
12239 g_assert (cfg->ret);
12240 g_assert (mono_method_signature (method)->pinvoke);
12245 token = read32 (ip + 2);
12246 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12248 if (!cfg->vret_addr) {
12249 g_assert (cfg->ret_var_is_local);
12251 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12253 EMIT_NEW_RETLOADA (cfg, ins);
12255 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12257 if (sp != stack_start)
12260 MONO_INST_NEW (cfg, ins, OP_BR);
12261 ins->inst_target_bb = end_bblock;
12262 MONO_ADD_INS (bblock, ins);
12263 link_bblock (cfg, bblock, end_bblock);
12264 start_new_bblock = 1;
12268 case CEE_MONO_CISINST:
12269 case CEE_MONO_CCASTCLASS: {
12274 token = read32 (ip + 2);
12275 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12276 if (ip [1] == CEE_MONO_CISINST)
12277 ins = handle_cisinst (cfg, klass, sp [0]);
12279 ins = handle_ccastclass (cfg, klass, sp [0]);
12285 case CEE_MONO_SAVE_LMF:
12286 case CEE_MONO_RESTORE_LMF:
12287 #ifdef MONO_ARCH_HAVE_LMF_OPS
12288 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12289 MONO_ADD_INS (bblock, ins);
12290 cfg->need_lmf_area = TRUE;
12294 case CEE_MONO_CLASSCONST:
12295 CHECK_STACK_OVF (1);
12297 token = read32 (ip + 2);
12298 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12301 inline_costs += 10 * num_calls++;
12303 case CEE_MONO_NOT_TAKEN:
12304 bblock->out_of_line = TRUE;
12307 case CEE_MONO_TLS: {
12310 CHECK_STACK_OVF (1);
12312 key = (gint32)read32 (ip + 2);
12313 g_assert (key < TLS_KEY_NUM);
12315 ins = mono_create_tls_get (cfg, key);
12317 if (cfg->compile_aot) {
12319 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12320 ins->dreg = alloc_preg (cfg);
12321 ins->type = STACK_PTR;
12323 g_assert_not_reached ();
12326 ins->type = STACK_PTR;
12327 MONO_ADD_INS (bblock, ins);
12332 case CEE_MONO_DYN_CALL: {
12333 MonoCallInst *call;
12335 /* It would be easier to call a trampoline, but that would put an
12336 * extra frame on the stack, confusing exception handling. So
12337 * implement it inline using an opcode for now.
12340 if (!cfg->dyn_call_var) {
12341 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12342 /* prevent it from being register allocated */
12343 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12346 /* Has to use a call inst since it local regalloc expects it */
12347 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12348 ins = (MonoInst*)call;
12350 ins->sreg1 = sp [0]->dreg;
12351 ins->sreg2 = sp [1]->dreg;
12352 MONO_ADD_INS (bblock, ins);
12354 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12357 inline_costs += 10 * num_calls++;
12361 case CEE_MONO_MEMORY_BARRIER: {
12363 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12367 case CEE_MONO_JIT_ATTACH: {
12368 MonoInst *args [16], *domain_ins;
12369 MonoInst *ad_ins, *jit_tls_ins;
12370 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12372 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12374 EMIT_NEW_PCONST (cfg, ins, NULL);
12375 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12377 ad_ins = mono_get_domain_intrinsic (cfg);
12378 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12380 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12381 NEW_BBLOCK (cfg, next_bb);
12382 NEW_BBLOCK (cfg, call_bb);
12384 if (cfg->compile_aot) {
12385 /* AOT code is only used in the root domain */
12386 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12388 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12390 MONO_ADD_INS (cfg->cbb, ad_ins);
12391 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12394 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12399 MONO_START_BB (cfg, call_bb);
12402 if (cfg->compile_aot) {
12403 /* AOT code is only used in the root domain */
12404 EMIT_NEW_PCONST (cfg, args [0], NULL);
12406 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12408 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12409 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12412 MONO_START_BB (cfg, next_bb);
12418 case CEE_MONO_JIT_DETACH: {
12419 MonoInst *args [16];
12421 /* Restore the original domain */
12422 dreg = alloc_ireg (cfg);
12423 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12424 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12429 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12435 case CEE_PREFIX1: {
12438 case CEE_ARGLIST: {
12439 /* somewhat similar to LDTOKEN */
12440 MonoInst *addr, *vtvar;
12441 CHECK_STACK_OVF (1);
12442 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12444 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12445 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12447 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12448 ins->type = STACK_VTYPE;
12449 ins->klass = mono_defaults.argumenthandle_class;
12459 MonoInst *cmp, *arg1, *arg2;
12467 * The following transforms:
12468 * CEE_CEQ into OP_CEQ
12469 * CEE_CGT into OP_CGT
12470 * CEE_CGT_UN into OP_CGT_UN
12471 * CEE_CLT into OP_CLT
12472 * CEE_CLT_UN into OP_CLT_UN
12474 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12476 MONO_INST_NEW (cfg, ins, cmp->opcode);
12477 cmp->sreg1 = arg1->dreg;
12478 cmp->sreg2 = arg2->dreg;
12479 type_from_op (cfg, cmp, arg1, arg2);
12481 add_widen_op (cfg, cmp, &arg1, &arg2);
12482 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12483 cmp->opcode = OP_LCOMPARE;
12484 else if (arg1->type == STACK_R4)
12485 cmp->opcode = OP_RCOMPARE;
12486 else if (arg1->type == STACK_R8)
12487 cmp->opcode = OP_FCOMPARE;
12489 cmp->opcode = OP_ICOMPARE;
12490 MONO_ADD_INS (bblock, cmp);
12491 ins->type = STACK_I4;
12492 ins->dreg = alloc_dreg (cfg, ins->type);
12493 type_from_op (cfg, ins, arg1, arg2);
12495 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12497 * The backends expect the fceq opcodes to do the
12500 ins->sreg1 = cmp->sreg1;
12501 ins->sreg2 = cmp->sreg2;
12504 MONO_ADD_INS (bblock, ins);
12510 MonoInst *argconst;
12511 MonoMethod *cil_method;
12513 CHECK_STACK_OVF (1);
12515 n = read32 (ip + 2);
12516 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12517 if (!cmethod || mono_loader_get_last_error ())
12519 mono_class_init (cmethod->klass);
12521 mono_save_token_info (cfg, image, n, cmethod);
12523 context_used = mini_method_check_context_used (cfg, cmethod);
12525 cil_method = cmethod;
12526 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12527 METHOD_ACCESS_FAILURE (method, cil_method);
12529 if (mono_security_cas_enabled ()) {
12530 if (check_linkdemand (cfg, method, cmethod))
12531 INLINE_FAILURE ("linkdemand");
12532 CHECK_CFG_EXCEPTION;
12533 } else if (mono_security_core_clr_enabled ()) {
12534 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12538 * Optimize the common case of ldftn+delegate creation
12540 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12541 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12542 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12543 MonoInst *target_ins, *handle_ins;
12544 MonoMethod *invoke;
12545 int invoke_context_used;
12547 invoke = mono_get_delegate_invoke (ctor_method->klass);
12548 if (!invoke || !mono_method_signature (invoke))
12551 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12553 target_ins = sp [-1];
12555 if (mono_security_core_clr_enabled ())
12556 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12558 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12559 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12560 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12562 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12566 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12567 /* FIXME: SGEN support */
12568 if (invoke_context_used == 0) {
12570 if (cfg->verbose_level > 3)
12571 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12572 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12575 CHECK_CFG_EXCEPTION;
12586 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12587 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12591 inline_costs += 10 * num_calls++;
12594 case CEE_LDVIRTFTN: {
12595 MonoInst *args [2];
12599 n = read32 (ip + 2);
12600 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12601 if (!cmethod || mono_loader_get_last_error ())
12603 mono_class_init (cmethod->klass);
12605 context_used = mini_method_check_context_used (cfg, cmethod);
12607 if (mono_security_cas_enabled ()) {
12608 if (check_linkdemand (cfg, method, cmethod))
12609 INLINE_FAILURE ("linkdemand");
12610 CHECK_CFG_EXCEPTION;
12611 } else if (mono_security_core_clr_enabled ()) {
12612 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12616 * Optimize the common case of ldvirtftn+delegate creation
12618 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12619 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12620 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12621 MonoInst *target_ins, *handle_ins;
12622 MonoMethod *invoke;
12623 int invoke_context_used;
12625 invoke = mono_get_delegate_invoke (ctor_method->klass);
12626 if (!invoke || !mono_method_signature (invoke))
12629 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12631 target_ins = sp [-1];
12633 if (mono_security_core_clr_enabled ())
12634 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12636 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12637 /* FIXME: SGEN support */
12638 if (invoke_context_used == 0) {
12640 if (cfg->verbose_level > 3)
12641 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12642 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12645 CHECK_CFG_EXCEPTION;
12659 args [1] = emit_get_rgctx_method (cfg, context_used,
12660 cmethod, MONO_RGCTX_INFO_METHOD);
12663 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12665 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12668 inline_costs += 10 * num_calls++;
12672 CHECK_STACK_OVF (1);
12674 n = read16 (ip + 2);
12676 EMIT_NEW_ARGLOAD (cfg, ins, n);
12681 CHECK_STACK_OVF (1);
12683 n = read16 (ip + 2);
12685 NEW_ARGLOADA (cfg, ins, n);
12686 MONO_ADD_INS (cfg->cbb, ins);
12694 n = read16 (ip + 2);
12696 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12698 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12702 CHECK_STACK_OVF (1);
12704 n = read16 (ip + 2);
12706 EMIT_NEW_LOCLOAD (cfg, ins, n);
12711 unsigned char *tmp_ip;
12712 CHECK_STACK_OVF (1);
12714 n = read16 (ip + 2);
12717 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12723 EMIT_NEW_LOCLOADA (cfg, ins, n);
12732 n = read16 (ip + 2);
12734 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12736 emit_stloc_ir (cfg, sp, header, n);
12743 if (sp != stack_start)
12745 if (cfg->method != method)
12747 * Inlining this into a loop in a parent could lead to
12748 * stack overflows which is different behavior than the
12749 * non-inlined case, thus disable inlining in this case.
12751 INLINE_FAILURE("localloc");
12753 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12754 ins->dreg = alloc_preg (cfg);
12755 ins->sreg1 = sp [0]->dreg;
12756 ins->type = STACK_PTR;
12757 MONO_ADD_INS (cfg->cbb, ins);
12759 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12761 ins->flags |= MONO_INST_INIT;
12766 case CEE_ENDFILTER: {
12767 MonoExceptionClause *clause, *nearest;
12772 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12774 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12775 ins->sreg1 = (*sp)->dreg;
12776 MONO_ADD_INS (bblock, ins);
12777 start_new_bblock = 1;
12781 for (cc = 0; cc < header->num_clauses; ++cc) {
12782 clause = &header->clauses [cc];
12783 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12784 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12785 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12788 g_assert (nearest);
12789 if ((ip - header->code) != nearest->handler_offset)
12794 case CEE_UNALIGNED_:
12795 ins_flag |= MONO_INST_UNALIGNED;
12796 /* FIXME: record alignment? we can assume 1 for now */
12800 case CEE_VOLATILE_:
12801 ins_flag |= MONO_INST_VOLATILE;
12805 ins_flag |= MONO_INST_TAILCALL;
12806 cfg->flags |= MONO_CFG_HAS_TAIL;
12807 /* Can't inline tail calls at this time */
12808 inline_costs += 100000;
12815 token = read32 (ip + 2);
12816 klass = mini_get_class (method, token, generic_context);
12817 CHECK_TYPELOAD (klass);
12818 if (generic_class_is_reference_type (cfg, klass))
12819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12821 mini_emit_initobj (cfg, *sp, NULL, klass);
12825 case CEE_CONSTRAINED_:
12827 token = read32 (ip + 2);
12828 constrained_class = mini_get_class (method, token, generic_context);
12829 CHECK_TYPELOAD (constrained_class);
12833 case CEE_INITBLK: {
12834 MonoInst *iargs [3];
12838 /* Skip optimized paths for volatile operations. */
12839 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12840 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12841 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12842 /* emit_memset only works when val == 0 */
12843 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12846 iargs [0] = sp [0];
12847 iargs [1] = sp [1];
12848 iargs [2] = sp [2];
12849 if (ip [1] == CEE_CPBLK) {
12851 * FIXME: It's unclear whether we should be emitting both the acquire
12852 * and release barriers for cpblk. It is technically both a load and
12853 * store operation, so it seems like that's the sensible thing to do.
12855 * FIXME: We emit full barriers on both sides of the operation for
12856 * simplicity. We should have a separate atomic memcpy method instead.
12858 MonoMethod *memcpy_method = get_memcpy_method ();
12860 if (ins_flag & MONO_INST_VOLATILE)
12861 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12863 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12864 call->flags |= ins_flag;
12866 if (ins_flag & MONO_INST_VOLATILE)
12867 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12869 MonoMethod *memset_method = get_memset_method ();
12870 if (ins_flag & MONO_INST_VOLATILE) {
12871 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12872 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12874 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12875 call->flags |= ins_flag;
12886 ins_flag |= MONO_INST_NOTYPECHECK;
12888 ins_flag |= MONO_INST_NORANGECHECK;
12889 /* we ignore the no-nullcheck for now since we
12890 * really do it explicitly only when doing callvirt->call
12894 case CEE_RETHROW: {
12896 int handler_offset = -1;
12898 for (i = 0; i < header->num_clauses; ++i) {
12899 MonoExceptionClause *clause = &header->clauses [i];
12900 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12901 handler_offset = clause->handler_offset;
12906 bblock->flags |= BB_EXCEPTION_UNSAFE;
12908 if (handler_offset == -1)
12911 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12912 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12913 ins->sreg1 = load->dreg;
12914 MONO_ADD_INS (bblock, ins);
12916 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12917 MONO_ADD_INS (bblock, ins);
12920 link_bblock (cfg, bblock, end_bblock);
12921 start_new_bblock = 1;
12929 CHECK_STACK_OVF (1);
12931 token = read32 (ip + 2);
12932 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12933 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12936 val = mono_type_size (type, &ialign);
12938 MonoClass *klass = mini_get_class (method, token, generic_context);
12939 CHECK_TYPELOAD (klass);
12941 val = mono_type_size (&klass->byval_arg, &ialign);
12943 if (mini_is_gsharedvt_klass (cfg, klass))
12944 GSHAREDVT_FAILURE (*ip);
12946 EMIT_NEW_ICONST (cfg, ins, val);
12951 case CEE_REFANYTYPE: {
12952 MonoInst *src_var, *src;
12954 GSHAREDVT_FAILURE (*ip);
12960 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12962 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12963 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12969 case CEE_READONLY_:
12982 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12992 g_warning ("opcode 0x%02x not handled", *ip);
12996 if (start_new_bblock != 1)
12999 bblock->cil_length = ip - bblock->cil_code;
13000 if (bblock->next_bb) {
13001 /* This could already be set because of inlining, #693905 */
13002 MonoBasicBlock *bb = bblock;
13004 while (bb->next_bb)
13006 bb->next_bb = end_bblock;
13008 bblock->next_bb = end_bblock;
13011 if (cfg->method == method && cfg->domainvar) {
13013 MonoInst *get_domain;
13015 cfg->cbb = init_localsbb;
13017 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13018 MONO_ADD_INS (cfg->cbb, get_domain);
13020 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13022 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13023 MONO_ADD_INS (cfg->cbb, store);
13026 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13027 if (cfg->compile_aot)
13028 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13029 mono_get_got_var (cfg);
13032 if (cfg->method == method && cfg->got_var)
13033 mono_emit_load_got_addr (cfg);
13035 if (init_localsbb) {
13036 cfg->cbb = init_localsbb;
13038 for (i = 0; i < header->num_locals; ++i) {
13039 emit_init_local (cfg, i, header->locals [i], init_locals);
13043 if (cfg->init_ref_vars && cfg->method == method) {
13044 /* Emit initialization for ref vars */
13045 // FIXME: Avoid duplication initialization for IL locals.
13046 for (i = 0; i < cfg->num_varinfo; ++i) {
13047 MonoInst *ins = cfg->varinfo [i];
13049 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13050 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13054 if (cfg->lmf_var && cfg->method == method) {
13055 cfg->cbb = init_localsbb;
13056 emit_push_lmf (cfg);
13059 cfg->cbb = init_localsbb;
13060 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13063 MonoBasicBlock *bb;
13066 * Make seq points at backward branch targets interruptable.
13068 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13069 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13070 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13073 /* Add a sequence point for method entry/exit events */
13074 if (seq_points && cfg->gen_seq_points_debug_data) {
13075 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13076 MONO_ADD_INS (init_localsbb, ins);
13077 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13078 MONO_ADD_INS (cfg->bb_exit, ins);
13082 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13083 * the code they refer to was dead (#11880).
13085 if (sym_seq_points) {
13086 for (i = 0; i < header->code_size; ++i) {
13087 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13090 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13091 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13098 if (cfg->method == method) {
13099 MonoBasicBlock *bb;
13100 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13101 bb->region = mono_find_block_region (cfg, bb->real_offset);
13103 mono_create_spvar_for_region (cfg, bb->region);
13104 if (cfg->verbose_level > 2)
13105 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13109 if (inline_costs < 0) {
13112 /* Method is too large */
13113 mname = mono_method_full_name (method, TRUE);
13114 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13115 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13119 if ((cfg->verbose_level > 2) && (cfg->method == method))
13120 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13125 g_assert (!mono_error_ok (&cfg->error));
13129 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13133 set_exception_type_from_invalid_il (cfg, method, ip);
13137 g_slist_free (class_inits);
13138 mono_basic_block_free (original_bb);
13139 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13140 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13141 if (cfg->exception_type)
13144 return inline_costs;
13148 store_membase_reg_to_store_membase_imm (int opcode)
13151 case OP_STORE_MEMBASE_REG:
13152 return OP_STORE_MEMBASE_IMM;
13153 case OP_STOREI1_MEMBASE_REG:
13154 return OP_STOREI1_MEMBASE_IMM;
13155 case OP_STOREI2_MEMBASE_REG:
13156 return OP_STOREI2_MEMBASE_IMM;
13157 case OP_STOREI4_MEMBASE_REG:
13158 return OP_STOREI4_MEMBASE_IMM;
13159 case OP_STOREI8_MEMBASE_REG:
13160 return OP_STOREI8_MEMBASE_IMM;
13162 g_assert_not_reached ();
13169 mono_op_to_op_imm (int opcode)
13173 return OP_IADD_IMM;
13175 return OP_ISUB_IMM;
13177 return OP_IDIV_IMM;
13179 return OP_IDIV_UN_IMM;
13181 return OP_IREM_IMM;
13183 return OP_IREM_UN_IMM;
13185 return OP_IMUL_IMM;
13187 return OP_IAND_IMM;
13191 return OP_IXOR_IMM;
13193 return OP_ISHL_IMM;
13195 return OP_ISHR_IMM;
13197 return OP_ISHR_UN_IMM;
13200 return OP_LADD_IMM;
13202 return OP_LSUB_IMM;
13204 return OP_LAND_IMM;
13208 return OP_LXOR_IMM;
13210 return OP_LSHL_IMM;
13212 return OP_LSHR_IMM;
13214 return OP_LSHR_UN_IMM;
13215 #if SIZEOF_REGISTER == 8
13217 return OP_LREM_IMM;
13221 return OP_COMPARE_IMM;
13223 return OP_ICOMPARE_IMM;
13225 return OP_LCOMPARE_IMM;
13227 case OP_STORE_MEMBASE_REG:
13228 return OP_STORE_MEMBASE_IMM;
13229 case OP_STOREI1_MEMBASE_REG:
13230 return OP_STOREI1_MEMBASE_IMM;
13231 case OP_STOREI2_MEMBASE_REG:
13232 return OP_STOREI2_MEMBASE_IMM;
13233 case OP_STOREI4_MEMBASE_REG:
13234 return OP_STOREI4_MEMBASE_IMM;
13236 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13238 return OP_X86_PUSH_IMM;
13239 case OP_X86_COMPARE_MEMBASE_REG:
13240 return OP_X86_COMPARE_MEMBASE_IMM;
13242 #if defined(TARGET_AMD64)
13243 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13244 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13246 case OP_VOIDCALL_REG:
13247 return OP_VOIDCALL;
13255 return OP_LOCALLOC_IMM;
13262 ldind_to_load_membase (int opcode)
13266 return OP_LOADI1_MEMBASE;
13268 return OP_LOADU1_MEMBASE;
13270 return OP_LOADI2_MEMBASE;
13272 return OP_LOADU2_MEMBASE;
13274 return OP_LOADI4_MEMBASE;
13276 return OP_LOADU4_MEMBASE;
13278 return OP_LOAD_MEMBASE;
13279 case CEE_LDIND_REF:
13280 return OP_LOAD_MEMBASE;
13282 return OP_LOADI8_MEMBASE;
13284 return OP_LOADR4_MEMBASE;
13286 return OP_LOADR8_MEMBASE;
13288 g_assert_not_reached ();
13295 stind_to_store_membase (int opcode)
13299 return OP_STOREI1_MEMBASE_REG;
13301 return OP_STOREI2_MEMBASE_REG;
13303 return OP_STOREI4_MEMBASE_REG;
13305 case CEE_STIND_REF:
13306 return OP_STORE_MEMBASE_REG;
13308 return OP_STOREI8_MEMBASE_REG;
13310 return OP_STORER4_MEMBASE_REG;
13312 return OP_STORER8_MEMBASE_REG;
13314 g_assert_not_reached ();
13321 mono_load_membase_to_load_mem (int opcode)
13323 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13324 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13326 case OP_LOAD_MEMBASE:
13327 return OP_LOAD_MEM;
13328 case OP_LOADU1_MEMBASE:
13329 return OP_LOADU1_MEM;
13330 case OP_LOADU2_MEMBASE:
13331 return OP_LOADU2_MEM;
13332 case OP_LOADI4_MEMBASE:
13333 return OP_LOADI4_MEM;
13334 case OP_LOADU4_MEMBASE:
13335 return OP_LOADU4_MEM;
13336 #if SIZEOF_REGISTER == 8
13337 case OP_LOADI8_MEMBASE:
13338 return OP_LOADI8_MEM;
13347 op_to_op_dest_membase (int store_opcode, int opcode)
13349 #if defined(TARGET_X86)
13350 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13355 return OP_X86_ADD_MEMBASE_REG;
13357 return OP_X86_SUB_MEMBASE_REG;
13359 return OP_X86_AND_MEMBASE_REG;
13361 return OP_X86_OR_MEMBASE_REG;
13363 return OP_X86_XOR_MEMBASE_REG;
13366 return OP_X86_ADD_MEMBASE_IMM;
13369 return OP_X86_SUB_MEMBASE_IMM;
13372 return OP_X86_AND_MEMBASE_IMM;
13375 return OP_X86_OR_MEMBASE_IMM;
13378 return OP_X86_XOR_MEMBASE_IMM;
13384 #if defined(TARGET_AMD64)
13385 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13390 return OP_X86_ADD_MEMBASE_REG;
13392 return OP_X86_SUB_MEMBASE_REG;
13394 return OP_X86_AND_MEMBASE_REG;
13396 return OP_X86_OR_MEMBASE_REG;
13398 return OP_X86_XOR_MEMBASE_REG;
13400 return OP_X86_ADD_MEMBASE_IMM;
13402 return OP_X86_SUB_MEMBASE_IMM;
13404 return OP_X86_AND_MEMBASE_IMM;
13406 return OP_X86_OR_MEMBASE_IMM;
13408 return OP_X86_XOR_MEMBASE_IMM;
13410 return OP_AMD64_ADD_MEMBASE_REG;
13412 return OP_AMD64_SUB_MEMBASE_REG;
13414 return OP_AMD64_AND_MEMBASE_REG;
13416 return OP_AMD64_OR_MEMBASE_REG;
13418 return OP_AMD64_XOR_MEMBASE_REG;
13421 return OP_AMD64_ADD_MEMBASE_IMM;
13424 return OP_AMD64_SUB_MEMBASE_IMM;
13427 return OP_AMD64_AND_MEMBASE_IMM;
13430 return OP_AMD64_OR_MEMBASE_IMM;
13433 return OP_AMD64_XOR_MEMBASE_IMM;
13443 op_to_op_store_membase (int store_opcode, int opcode)
13445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13448 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13449 return OP_X86_SETEQ_MEMBASE;
13451 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13452 return OP_X86_SETNE_MEMBASE;
13460 op_to_op_src1_membase (int load_opcode, int opcode)
13463 /* FIXME: This has sign extension issues */
13465 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13466 return OP_X86_COMPARE_MEMBASE8_IMM;
13469 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13474 return OP_X86_PUSH_MEMBASE;
13475 case OP_COMPARE_IMM:
13476 case OP_ICOMPARE_IMM:
13477 return OP_X86_COMPARE_MEMBASE_IMM;
13480 return OP_X86_COMPARE_MEMBASE_REG;
13484 #ifdef TARGET_AMD64
13485 /* FIXME: This has sign extension issues */
13487 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13488 return OP_X86_COMPARE_MEMBASE8_IMM;
13493 #ifdef __mono_ilp32__
13494 if (load_opcode == OP_LOADI8_MEMBASE)
13496 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13498 return OP_X86_PUSH_MEMBASE;
13500 /* FIXME: This only works for 32 bit immediates
13501 case OP_COMPARE_IMM:
13502 case OP_LCOMPARE_IMM:
13503 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13504 return OP_AMD64_COMPARE_MEMBASE_IMM;
13506 case OP_ICOMPARE_IMM:
13507 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13508 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13512 #ifdef __mono_ilp32__
13513 if (load_opcode == OP_LOAD_MEMBASE)
13514 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13515 if (load_opcode == OP_LOADI8_MEMBASE)
13517 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13519 return OP_AMD64_COMPARE_MEMBASE_REG;
13522 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13523 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13532 op_to_op_src2_membase (int load_opcode, int opcode)
13535 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13541 return OP_X86_COMPARE_REG_MEMBASE;
13543 return OP_X86_ADD_REG_MEMBASE;
13545 return OP_X86_SUB_REG_MEMBASE;
13547 return OP_X86_AND_REG_MEMBASE;
13549 return OP_X86_OR_REG_MEMBASE;
13551 return OP_X86_XOR_REG_MEMBASE;
13555 #ifdef TARGET_AMD64
13556 #ifdef __mono_ilp32__
13557 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13559 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13563 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13565 return OP_X86_ADD_REG_MEMBASE;
13567 return OP_X86_SUB_REG_MEMBASE;
13569 return OP_X86_AND_REG_MEMBASE;
13571 return OP_X86_OR_REG_MEMBASE;
13573 return OP_X86_XOR_REG_MEMBASE;
13575 #ifdef __mono_ilp32__
13576 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13578 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13583 return OP_AMD64_COMPARE_REG_MEMBASE;
13585 return OP_AMD64_ADD_REG_MEMBASE;
13587 return OP_AMD64_SUB_REG_MEMBASE;
13589 return OP_AMD64_AND_REG_MEMBASE;
13591 return OP_AMD64_OR_REG_MEMBASE;
13593 return OP_AMD64_XOR_REG_MEMBASE;
13602 mono_op_to_op_imm_noemul (int opcode)
13605 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13611 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13618 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13623 return mono_op_to_op_imm (opcode);
13628 * mono_handle_global_vregs:
13630 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13634 mono_handle_global_vregs (MonoCompile *cfg)
13636 gint32 *vreg_to_bb;
13637 MonoBasicBlock *bb;
13640 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13642 #ifdef MONO_ARCH_SIMD_INTRINSICS
13643 if (cfg->uses_simd_intrinsics)
13644 mono_simd_simplify_indirection (cfg);
13647 /* Find local vregs used in more than one bb */
13648 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13649 MonoInst *ins = bb->code;
13650 int block_num = bb->block_num;
13652 if (cfg->verbose_level > 2)
13653 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13656 for (; ins; ins = ins->next) {
13657 const char *spec = INS_INFO (ins->opcode);
13658 int regtype = 0, regindex;
13661 if (G_UNLIKELY (cfg->verbose_level > 2))
13662 mono_print_ins (ins);
13664 g_assert (ins->opcode >= MONO_CEE_LAST);
13666 for (regindex = 0; regindex < 4; regindex ++) {
13669 if (regindex == 0) {
13670 regtype = spec [MONO_INST_DEST];
13671 if (regtype == ' ')
13674 } else if (regindex == 1) {
13675 regtype = spec [MONO_INST_SRC1];
13676 if (regtype == ' ')
13679 } else if (regindex == 2) {
13680 regtype = spec [MONO_INST_SRC2];
13681 if (regtype == ' ')
13684 } else if (regindex == 3) {
13685 regtype = spec [MONO_INST_SRC3];
13686 if (regtype == ' ')
13691 #if SIZEOF_REGISTER == 4
13692 /* In the LLVM case, the long opcodes are not decomposed */
13693 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13695 * Since some instructions reference the original long vreg,
13696 * and some reference the two component vregs, it is quite hard
13697 * to determine when it needs to be global. So be conservative.
13699 if (!get_vreg_to_inst (cfg, vreg)) {
13700 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13702 if (cfg->verbose_level > 2)
13703 printf ("LONG VREG R%d made global.\n", vreg);
13707 * Make the component vregs volatile since the optimizations can
13708 * get confused otherwise.
13710 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13711 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13715 g_assert (vreg != -1);
13717 prev_bb = vreg_to_bb [vreg];
13718 if (prev_bb == 0) {
13719 /* 0 is a valid block num */
13720 vreg_to_bb [vreg] = block_num + 1;
13721 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13722 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13725 if (!get_vreg_to_inst (cfg, vreg)) {
13726 if (G_UNLIKELY (cfg->verbose_level > 2))
13727 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13731 if (vreg_is_ref (cfg, vreg))
13732 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13734 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13737 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13740 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13743 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13746 g_assert_not_reached ();
13750 /* Flag as having been used in more than one bb */
13751 vreg_to_bb [vreg] = -1;
13757 /* If a variable is used in only one bblock, convert it into a local vreg */
13758 for (i = 0; i < cfg->num_varinfo; i++) {
13759 MonoInst *var = cfg->varinfo [i];
13760 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13762 switch (var->type) {
13768 #if SIZEOF_REGISTER == 8
13771 #if !defined(TARGET_X86)
13772 /* Enabling this screws up the fp stack on x86 */
13775 if (mono_arch_is_soft_float ())
13778 /* Arguments are implicitly global */
13779 /* Putting R4 vars into registers doesn't work currently */
13780 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13781 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13783 * Make that the variable's liveness interval doesn't contain a call, since
13784 * that would cause the lvreg to be spilled, making the whole optimization
13787 /* This is too slow for JIT compilation */
13789 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13791 int def_index, call_index, ins_index;
13792 gboolean spilled = FALSE;
13797 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13798 const char *spec = INS_INFO (ins->opcode);
13800 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13801 def_index = ins_index;
13803 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13804 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13805 if (call_index > def_index) {
13811 if (MONO_IS_CALL (ins))
13812 call_index = ins_index;
13822 if (G_UNLIKELY (cfg->verbose_level > 2))
13823 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13824 var->flags |= MONO_INST_IS_DEAD;
13825 cfg->vreg_to_inst [var->dreg] = NULL;
13832 * Compress the varinfo and vars tables so the liveness computation is faster and
13833 * takes up less space.
13836 for (i = 0; i < cfg->num_varinfo; ++i) {
13837 MonoInst *var = cfg->varinfo [i];
13838 if (pos < i && cfg->locals_start == i)
13839 cfg->locals_start = pos;
13840 if (!(var->flags & MONO_INST_IS_DEAD)) {
13842 cfg->varinfo [pos] = cfg->varinfo [i];
13843 cfg->varinfo [pos]->inst_c0 = pos;
13844 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13845 cfg->vars [pos].idx = pos;
13846 #if SIZEOF_REGISTER == 4
13847 if (cfg->varinfo [pos]->type == STACK_I8) {
13848 /* Modify the two component vars too */
13851 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13852 var1->inst_c0 = pos;
13853 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13854 var1->inst_c0 = pos;
13861 cfg->num_varinfo = pos;
13862 if (cfg->locals_start > cfg->num_varinfo)
13863 cfg->locals_start = cfg->num_varinfo;
13867 * mono_spill_global_vars:
13869 * Generate spill code for variables which are not allocated to registers,
13870 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13871 * code is generated which could be optimized by the local optimization passes.
13874 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13876 MonoBasicBlock *bb;
13878 int orig_next_vreg;
13879 guint32 *vreg_to_lvreg;
13881 guint32 i, lvregs_len;
13882 gboolean dest_has_lvreg = FALSE;
13883 guint32 stacktypes [128];
13884 MonoInst **live_range_start, **live_range_end;
13885 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13886 int *gsharedvt_vreg_to_idx = NULL;
13888 *need_local_opts = FALSE;
13890 memset (spec2, 0, sizeof (spec2));
13892 /* FIXME: Move this function to mini.c */
13893 stacktypes ['i'] = STACK_PTR;
13894 stacktypes ['l'] = STACK_I8;
13895 stacktypes ['f'] = STACK_R8;
13896 #ifdef MONO_ARCH_SIMD_INTRINSICS
13897 stacktypes ['x'] = STACK_VTYPE;
13900 #if SIZEOF_REGISTER == 4
13901 /* Create MonoInsts for longs */
13902 for (i = 0; i < cfg->num_varinfo; i++) {
13903 MonoInst *ins = cfg->varinfo [i];
13905 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13906 switch (ins->type) {
13911 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13914 g_assert (ins->opcode == OP_REGOFFSET);
13916 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13918 tree->opcode = OP_REGOFFSET;
13919 tree->inst_basereg = ins->inst_basereg;
13920 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13922 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13924 tree->opcode = OP_REGOFFSET;
13925 tree->inst_basereg = ins->inst_basereg;
13926 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13936 if (cfg->compute_gc_maps) {
13937 /* registers need liveness info even for !non refs */
13938 for (i = 0; i < cfg->num_varinfo; i++) {
13939 MonoInst *ins = cfg->varinfo [i];
13941 if (ins->opcode == OP_REGVAR)
13942 ins->flags |= MONO_INST_GC_TRACK;
13946 if (cfg->gsharedvt) {
13947 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13949 for (i = 0; i < cfg->num_varinfo; ++i) {
13950 MonoInst *ins = cfg->varinfo [i];
13953 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13954 if (i >= cfg->locals_start) {
13956 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13957 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13958 ins->opcode = OP_GSHAREDVT_LOCAL;
13959 ins->inst_imm = idx;
13962 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13963 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13969 /* FIXME: widening and truncation */
13972 * As an optimization, when a variable allocated to the stack is first loaded into
13973 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13974 * the variable again.
13976 orig_next_vreg = cfg->next_vreg;
13977 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13978 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13982 * These arrays contain the first and last instructions accessing a given
13984 * Since we emit bblocks in the same order we process them here, and we
13985 * don't split live ranges, these will precisely describe the live range of
13986 * the variable, i.e. the instruction range where a valid value can be found
13987 * in the variables location.
13988 * The live range is computed using the liveness info computed by the liveness pass.
13989 * We can't use vmv->range, since that is an abstract live range, and we need
13990 * one which is instruction precise.
13991 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13993 /* FIXME: Only do this if debugging info is requested */
13994 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13995 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13996 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13997 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13999 /* Add spill loads/stores */
14000 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14003 if (cfg->verbose_level > 2)
14004 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14006 /* Clear vreg_to_lvreg array */
14007 for (i = 0; i < lvregs_len; i++)
14008 vreg_to_lvreg [lvregs [i]] = 0;
14012 MONO_BB_FOR_EACH_INS (bb, ins) {
14013 const char *spec = INS_INFO (ins->opcode);
14014 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14015 gboolean store, no_lvreg;
14016 int sregs [MONO_MAX_SRC_REGS];
14018 if (G_UNLIKELY (cfg->verbose_level > 2))
14019 mono_print_ins (ins);
14021 if (ins->opcode == OP_NOP)
14025 * We handle LDADDR here as well, since it can only be decomposed
14026 * when variable addresses are known.
14028 if (ins->opcode == OP_LDADDR) {
14029 MonoInst *var = ins->inst_p0;
14031 if (var->opcode == OP_VTARG_ADDR) {
14032 /* Happens on SPARC/S390 where vtypes are passed by reference */
14033 MonoInst *vtaddr = var->inst_left;
14034 if (vtaddr->opcode == OP_REGVAR) {
14035 ins->opcode = OP_MOVE;
14036 ins->sreg1 = vtaddr->dreg;
14038 else if (var->inst_left->opcode == OP_REGOFFSET) {
14039 ins->opcode = OP_LOAD_MEMBASE;
14040 ins->inst_basereg = vtaddr->inst_basereg;
14041 ins->inst_offset = vtaddr->inst_offset;
14044 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14045 /* gsharedvt arg passed by ref */
14046 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14048 ins->opcode = OP_LOAD_MEMBASE;
14049 ins->inst_basereg = var->inst_basereg;
14050 ins->inst_offset = var->inst_offset;
14051 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14052 MonoInst *load, *load2, *load3;
14053 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14054 int reg1, reg2, reg3;
14055 MonoInst *info_var = cfg->gsharedvt_info_var;
14056 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14060 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14063 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14065 g_assert (info_var);
14066 g_assert (locals_var);
14068 /* Mark the instruction used to compute the locals var as used */
14069 cfg->gsharedvt_locals_var_ins = NULL;
14071 /* Load the offset */
14072 if (info_var->opcode == OP_REGOFFSET) {
14073 reg1 = alloc_ireg (cfg);
14074 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14075 } else if (info_var->opcode == OP_REGVAR) {
14077 reg1 = info_var->dreg;
14079 g_assert_not_reached ();
14081 reg2 = alloc_ireg (cfg);
14082 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14083 /* Load the locals area address */
14084 reg3 = alloc_ireg (cfg);
14085 if (locals_var->opcode == OP_REGOFFSET) {
14086 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14087 } else if (locals_var->opcode == OP_REGVAR) {
14088 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14090 g_assert_not_reached ();
14092 /* Compute the address */
14093 ins->opcode = OP_PADD;
14097 mono_bblock_insert_before_ins (bb, ins, load3);
14098 mono_bblock_insert_before_ins (bb, load3, load2);
14100 mono_bblock_insert_before_ins (bb, load2, load);
14102 g_assert (var->opcode == OP_REGOFFSET);
14104 ins->opcode = OP_ADD_IMM;
14105 ins->sreg1 = var->inst_basereg;
14106 ins->inst_imm = var->inst_offset;
14109 *need_local_opts = TRUE;
14110 spec = INS_INFO (ins->opcode);
14113 if (ins->opcode < MONO_CEE_LAST) {
14114 mono_print_ins (ins);
14115 g_assert_not_reached ();
14119 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14123 if (MONO_IS_STORE_MEMBASE (ins)) {
14124 tmp_reg = ins->dreg;
14125 ins->dreg = ins->sreg2;
14126 ins->sreg2 = tmp_reg;
14129 spec2 [MONO_INST_DEST] = ' ';
14130 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14131 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14132 spec2 [MONO_INST_SRC3] = ' ';
14134 } else if (MONO_IS_STORE_MEMINDEX (ins))
14135 g_assert_not_reached ();
14140 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14141 printf ("\t %.3s %d", spec, ins->dreg);
14142 num_sregs = mono_inst_get_src_registers (ins, sregs);
14143 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14144 printf (" %d", sregs [srcindex]);
14151 regtype = spec [MONO_INST_DEST];
14152 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14155 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14156 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14157 MonoInst *store_ins;
14159 MonoInst *def_ins = ins;
14160 int dreg = ins->dreg; /* The original vreg */
14162 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14164 if (var->opcode == OP_REGVAR) {
14165 ins->dreg = var->dreg;
14166 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14168 * Instead of emitting a load+store, use a _membase opcode.
14170 g_assert (var->opcode == OP_REGOFFSET);
14171 if (ins->opcode == OP_MOVE) {
14175 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14176 ins->inst_basereg = var->inst_basereg;
14177 ins->inst_offset = var->inst_offset;
14180 spec = INS_INFO (ins->opcode);
14184 g_assert (var->opcode == OP_REGOFFSET);
14186 prev_dreg = ins->dreg;
14188 /* Invalidate any previous lvreg for this vreg */
14189 vreg_to_lvreg [ins->dreg] = 0;
14193 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14195 store_opcode = OP_STOREI8_MEMBASE_REG;
14198 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14200 #if SIZEOF_REGISTER != 8
14201 if (regtype == 'l') {
14202 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14203 mono_bblock_insert_after_ins (bb, ins, store_ins);
14204 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14205 mono_bblock_insert_after_ins (bb, ins, store_ins);
14206 def_ins = store_ins;
14211 g_assert (store_opcode != OP_STOREV_MEMBASE);
14213 /* Try to fuse the store into the instruction itself */
14214 /* FIXME: Add more instructions */
14215 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14216 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14217 ins->inst_imm = ins->inst_c0;
14218 ins->inst_destbasereg = var->inst_basereg;
14219 ins->inst_offset = var->inst_offset;
14220 spec = INS_INFO (ins->opcode);
14221 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14222 ins->opcode = store_opcode;
14223 ins->inst_destbasereg = var->inst_basereg;
14224 ins->inst_offset = var->inst_offset;
14228 tmp_reg = ins->dreg;
14229 ins->dreg = ins->sreg2;
14230 ins->sreg2 = tmp_reg;
14233 spec2 [MONO_INST_DEST] = ' ';
14234 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14235 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14236 spec2 [MONO_INST_SRC3] = ' ';
14238 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14239 // FIXME: The backends expect the base reg to be in inst_basereg
14240 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14242 ins->inst_basereg = var->inst_basereg;
14243 ins->inst_offset = var->inst_offset;
14244 spec = INS_INFO (ins->opcode);
14246 /* printf ("INS: "); mono_print_ins (ins); */
14247 /* Create a store instruction */
14248 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14250 /* Insert it after the instruction */
14251 mono_bblock_insert_after_ins (bb, ins, store_ins);
14253 def_ins = store_ins;
14256 * We can't assign ins->dreg to var->dreg here, since the
14257 * sregs could use it. So set a flag, and do it after
14260 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14261 dest_has_lvreg = TRUE;
14266 if (def_ins && !live_range_start [dreg]) {
14267 live_range_start [dreg] = def_ins;
14268 live_range_start_bb [dreg] = bb;
14271 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14274 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14275 tmp->inst_c1 = dreg;
14276 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14283 num_sregs = mono_inst_get_src_registers (ins, sregs);
14284 for (srcindex = 0; srcindex < 3; ++srcindex) {
14285 regtype = spec [MONO_INST_SRC1 + srcindex];
14286 sreg = sregs [srcindex];
14288 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14289 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14290 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14291 MonoInst *use_ins = ins;
14292 MonoInst *load_ins;
14293 guint32 load_opcode;
14295 if (var->opcode == OP_REGVAR) {
14296 sregs [srcindex] = var->dreg;
14297 //mono_inst_set_src_registers (ins, sregs);
14298 live_range_end [sreg] = use_ins;
14299 live_range_end_bb [sreg] = bb;
14301 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14304 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14305 /* var->dreg is a hreg */
14306 tmp->inst_c1 = sreg;
14307 mono_bblock_insert_after_ins (bb, ins, tmp);
14313 g_assert (var->opcode == OP_REGOFFSET);
14315 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14317 g_assert (load_opcode != OP_LOADV_MEMBASE);
14319 if (vreg_to_lvreg [sreg]) {
14320 g_assert (vreg_to_lvreg [sreg] != -1);
14322 /* The variable is already loaded to an lvreg */
14323 if (G_UNLIKELY (cfg->verbose_level > 2))
14324 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14325 sregs [srcindex] = vreg_to_lvreg [sreg];
14326 //mono_inst_set_src_registers (ins, sregs);
14330 /* Try to fuse the load into the instruction */
14331 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14332 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14333 sregs [0] = var->inst_basereg;
14334 //mono_inst_set_src_registers (ins, sregs);
14335 ins->inst_offset = var->inst_offset;
14336 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14337 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14338 sregs [1] = var->inst_basereg;
14339 //mono_inst_set_src_registers (ins, sregs);
14340 ins->inst_offset = var->inst_offset;
14342 if (MONO_IS_REAL_MOVE (ins)) {
14343 ins->opcode = OP_NOP;
14346 //printf ("%d ", srcindex); mono_print_ins (ins);
14348 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14350 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14351 if (var->dreg == prev_dreg) {
14353 * sreg refers to the value loaded by the load
14354 * emitted below, but we need to use ins->dreg
14355 * since it refers to the store emitted earlier.
14359 g_assert (sreg != -1);
14360 vreg_to_lvreg [var->dreg] = sreg;
14361 g_assert (lvregs_len < 1024);
14362 lvregs [lvregs_len ++] = var->dreg;
14366 sregs [srcindex] = sreg;
14367 //mono_inst_set_src_registers (ins, sregs);
14369 #if SIZEOF_REGISTER != 8
14370 if (regtype == 'l') {
14371 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14372 mono_bblock_insert_before_ins (bb, ins, load_ins);
14373 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14374 mono_bblock_insert_before_ins (bb, ins, load_ins);
14375 use_ins = load_ins;
14380 #if SIZEOF_REGISTER == 4
14381 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14383 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14384 mono_bblock_insert_before_ins (bb, ins, load_ins);
14385 use_ins = load_ins;
14389 if (var->dreg < orig_next_vreg) {
14390 live_range_end [var->dreg] = use_ins;
14391 live_range_end_bb [var->dreg] = bb;
14394 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14397 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14398 tmp->inst_c1 = var->dreg;
14399 mono_bblock_insert_after_ins (bb, ins, tmp);
14403 mono_inst_set_src_registers (ins, sregs);
14405 if (dest_has_lvreg) {
14406 g_assert (ins->dreg != -1);
14407 vreg_to_lvreg [prev_dreg] = ins->dreg;
14408 g_assert (lvregs_len < 1024);
14409 lvregs [lvregs_len ++] = prev_dreg;
14410 dest_has_lvreg = FALSE;
14414 tmp_reg = ins->dreg;
14415 ins->dreg = ins->sreg2;
14416 ins->sreg2 = tmp_reg;
14419 if (MONO_IS_CALL (ins)) {
14420 /* Clear vreg_to_lvreg array */
14421 for (i = 0; i < lvregs_len; i++)
14422 vreg_to_lvreg [lvregs [i]] = 0;
14424 } else if (ins->opcode == OP_NOP) {
14426 MONO_INST_NULLIFY_SREGS (ins);
14429 if (cfg->verbose_level > 2)
14430 mono_print_ins_index (1, ins);
14433 /* Extend the live range based on the liveness info */
14434 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14435 for (i = 0; i < cfg->num_varinfo; i ++) {
14436 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14438 if (vreg_is_volatile (cfg, vi->vreg))
14439 /* The liveness info is incomplete */
14442 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14443 /* Live from at least the first ins of this bb */
14444 live_range_start [vi->vreg] = bb->code;
14445 live_range_start_bb [vi->vreg] = bb;
14448 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14449 /* Live at least until the last ins of this bb */
14450 live_range_end [vi->vreg] = bb->last_ins;
14451 live_range_end_bb [vi->vreg] = bb;
14457 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14459 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14460 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14462 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14463 for (i = 0; i < cfg->num_varinfo; ++i) {
14464 int vreg = MONO_VARINFO (cfg, i)->vreg;
14467 if (live_range_start [vreg]) {
14468 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14470 ins->inst_c1 = vreg;
14471 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14473 if (live_range_end [vreg]) {
14474 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14476 ins->inst_c1 = vreg;
14477 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14478 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14480 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14486 if (cfg->gsharedvt_locals_var_ins) {
14487 /* Nullify if unused */
14488 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14489 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14492 g_free (live_range_start);
14493 g_free (live_range_end);
14494 g_free (live_range_start_bb);
14495 g_free (live_range_end_bb);
14500 * - use 'iadd' instead of 'int_add'
14501 * - handling ovf opcodes: decompose in method_to_ir.
14502 * - unify iregs/fregs
14503 * -> partly done, the missing parts are:
14504 * - a more complete unification would involve unifying the hregs as well, so
14505 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14506 * would no longer map to the machine hregs, so the code generators would need to
14507 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14508 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14509 * fp/non-fp branches speeds it up by about 15%.
14510 * - use sext/zext opcodes instead of shifts
14512 * - get rid of TEMPLOADs if possible and use vregs instead
14513 * - clean up usage of OP_P/OP_ opcodes
14514 * - cleanup usage of DUMMY_USE
14515 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14517 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14518 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14519 * - make sure handle_stack_args () is called before the branch is emitted
14520 * - when the new IR is done, get rid of all unused stuff
14521 * - COMPARE/BEQ as separate instructions or unify them ?
14522 * - keeping them separate allows specialized compare instructions like
14523 * compare_imm, compare_membase
14524 * - most back ends unify fp compare+branch, fp compare+ceq
14525 * - integrate mono_save_args into inline_method
14526 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14527 * - handle long shift opts on 32 bit platforms somehow: they require
14528 * 3 sregs (2 for arg1 and 1 for arg2)
14529 * - make byref a 'normal' type.
14530 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14531 * variable if needed.
14532 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14533 * like inline_method.
14534 * - remove inlining restrictions
14535 * - fix LNEG and enable cfold of INEG
14536 * - generalize x86 optimizations like ldelema as a peephole optimization
14537 * - add store_mem_imm for amd64
14538 * - optimize the loading of the interruption flag in the managed->native wrappers
14539 * - avoid special handling of OP_NOP in passes
14540 * - move code inserting instructions into one function/macro.
14541 * - try a coalescing phase after liveness analysis
14542 * - add float -> vreg conversion + local optimizations on !x86
14543 * - figure out how to handle decomposed branches during optimizations, ie.
14544 * compare+branch, op_jump_table+op_br etc.
14545 * - promote RuntimeXHandles to vregs
14546 * - vtype cleanups:
14547 * - add a NEW_VARLOADA_VREG macro
14548 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14549 * accessing vtype fields.
14550 * - get rid of I8CONST on 64 bit platforms
14551 * - dealing with the increase in code size due to branches created during opcode
14553 * - use extended basic blocks
14554 * - all parts of the JIT
14555 * - handle_global_vregs () && local regalloc
14556 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14557 * - sources of increase in code size:
14560 * - isinst and castclass
14561 * - lvregs not allocated to global registers even if used multiple times
14562 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14564 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14565 * - add all micro optimizations from the old JIT
14566 * - put tree optimizations into the deadce pass
14567 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14568 * specific function.
14569 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14570 * fcompare + branchCC.
14571 * - create a helper function for allocating a stack slot, taking into account
14572 * MONO_CFG_HAS_SPILLUP.
14574 * - merge the ia64 switch changes.
14575 * - optimize mono_regstate2_alloc_int/float.
14576 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14577 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14578 * parts of the tree could be separated by other instructions, killing the tree
14579 * arguments, or stores killing loads etc. Also, should we fold loads into other
14580 * instructions if the result of the load is used multiple times ?
14581 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14582 * - LAST MERGE: 108395.
14583 * - when returning vtypes in registers, generate IR and append it to the end of the
14584 * last bb instead of doing it in the epilog.
14585 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14593 - When to decompose opcodes:
14594 - earlier: this makes some optimizations hard to implement, since the low level IR
14595 no longer contains the neccessary information. But it is easier to do.
14596 - later: harder to implement, enables more optimizations.
14597 - Branches inside bblocks:
14598 - created when decomposing complex opcodes.
14599 - branches to another bblock: harmless, but not tracked by the branch
14600 optimizations, so need to branch to a label at the start of the bblock.
14601 - branches to inside the same bblock: very problematic, trips up the local
14602 reg allocator. Can be fixed by spitting the current bblock, but that is a
14603 complex operation, since some local vregs can become global vregs etc.
14604 - Local/global vregs:
14605 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14606 local register allocator.
14607 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14608 structure, created by mono_create_var (). Assigned to hregs or the stack by
14609 the global register allocator.
14610 - When to do optimizations like alu->alu_imm:
14611 - earlier -> saves work later on since the IR will be smaller/simpler
14612 - later -> can work on more instructions
14613 - Handling of valuetypes:
14614 - When a vtype is pushed on the stack, a new temporary is created, an
14615 instruction computing its address (LDADDR) is emitted and pushed on
14616 the stack. Need to optimize cases when the vtype is used immediately as in
14617 argument passing, stloc etc.
14618 - Instead of the to_end stuff in the old JIT, simply call the function handling
14619 the values on the stack before emitting the last instruction of the bb.
14622 #endif /* DISABLE_JIT */