2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
310 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
332 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
366 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 /* Emit conversions so both operands of a binary opcode are of the same type */
462 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
464 MonoInst *arg1 = *arg1_ref;
465 MonoInst *arg2 = *arg2_ref;
468 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
469 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
472 /* Mixing r4/r8 is allowed by the spec */
473 if (arg1->type == STACK_R4) {
474 int dreg = alloc_freg (cfg);
476 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
477 conv->type = STACK_R8;
481 if (arg2->type == STACK_R4) {
482 int dreg = alloc_freg (cfg);
484 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
485 conv->type = STACK_R8;
491 #if SIZEOF_REGISTER == 8
492 /* FIXME: Need to add many more cases */
493 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
496 int dr = alloc_preg (cfg);
497 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
498 (ins)->sreg2 = widen->dreg;
503 #define ADD_BINOP(op) do { \
504 MONO_INST_NEW (cfg, ins, (op)); \
506 ins->sreg1 = sp [0]->dreg; \
507 ins->sreg2 = sp [1]->dreg; \
508 type_from_op (cfg, ins, sp [0], sp [1]); \
510 /* Have to insert a widening op */ \
511 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
512 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
517 #define ADD_UNOP(op) do { \
518 MONO_INST_NEW (cfg, ins, (op)); \
520 ins->sreg1 = sp [0]->dreg; \
521 type_from_op (cfg, ins, sp [0], NULL); \
523 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
524 MONO_ADD_INS ((cfg)->cbb, (ins)); \
525 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
528 #define ADD_BINCOND(next_block) do { \
531 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
532 cmp->sreg1 = sp [0]->dreg; \
533 cmp->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
537 type_from_op (cfg, ins, sp [0], sp [1]); \
538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
539 GET_BBLOCK (cfg, tblock, target); \
540 link_bblock (cfg, bblock, tblock); \
541 ins->inst_true_bb = tblock; \
542 if ((next_block)) { \
543 link_bblock (cfg, bblock, (next_block)); \
544 ins->inst_false_bb = (next_block); \
545 start_new_bblock = 1; \
547 GET_BBLOCK (cfg, tblock, ip); \
548 link_bblock (cfg, bblock, tblock); \
549 ins->inst_false_bb = tblock; \
550 start_new_bblock = 2; \
552 if (sp != stack_start) { \
553 handle_stack_args (cfg, stack_start, sp - stack_start); \
554 CHECK_UNVERIFIABLE (cfg); \
556 MONO_ADD_INS (bblock, cmp); \
557 MONO_ADD_INS (bblock, ins); \
561 * link_bblock: Links two basic blocks
563 * links two basic blocks in the control flow graph, the 'from'
564 * argument is the starting block and the 'to' argument is the block
565 * the control flow ends to after 'from'.
568 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
570 MonoBasicBlock **newa;
574 if (from->cil_code) {
576 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
581 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 printf ("edge from entry to exit\n");
588 for (i = 0; i < from->out_count; ++i) {
589 if (to == from->out_bb [i]) {
595 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
596 for (i = 0; i < from->out_count; ++i) {
597 newa [i] = from->out_bb [i];
605 for (i = 0; i < to->in_count; ++i) {
606 if (from == to->in_bb [i]) {
612 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
613 for (i = 0; i < to->in_count; ++i) {
614 newa [i] = to->in_bb [i];
623 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
625 link_bblock (cfg, from, to);
629 * mono_find_block_region:
631 * We mark each basic block with a region ID. We use that to avoid BB
632 * optimizations when blocks are in different regions.
635 * A region token that encodes where this region is, and information
636 * about the clause owner for this block.
638 * The region encodes the try/catch/filter clause that owns this block
639 * as well as the type. -1 is a special value that represents a block
640 * that is in none of try/catch/filter.
643 mono_find_block_region (MonoCompile *cfg, int offset)
645 MonoMethodHeader *header = cfg->header;
646 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
652 (offset < (clause->handler_offset)))
653 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
655 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
656 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
657 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
658 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
659 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
664 for (i = 0; i < header->num_clauses; ++i) {
665 clause = &header->clauses [i];
667 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
668 return ((i + 1) << 8) | clause->flags;
675 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
677 MonoMethodHeader *header = cfg->header;
678 MonoExceptionClause *clause;
682 for (i = 0; i < header->num_clauses; ++i) {
683 clause = &header->clauses [i];
684 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
685 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
686 if (clause->flags == type)
687 res = g_list_append (res, clause);
694 mono_create_spvar_for_region (MonoCompile *cfg, int region)
698 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
702 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
703 /* prevent it from being register allocated */
704 var->flags |= MONO_INST_VOLATILE;
706 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
710 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
712 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
716 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
720 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
724 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
725 /* prevent it from being register allocated */
726 var->flags |= MONO_INST_VOLATILE;
728 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
734 * Returns the type used in the eval stack when @type is loaded.
735 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
738 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
742 type = mini_get_underlying_type (cfg, type);
743 inst->klass = klass = mono_class_from_mono_type (type);
745 inst->type = STACK_MP;
750 switch (type->type) {
752 inst->type = STACK_INV;
760 inst->type = STACK_I4;
765 case MONO_TYPE_FNPTR:
766 inst->type = STACK_PTR;
768 case MONO_TYPE_CLASS:
769 case MONO_TYPE_STRING:
770 case MONO_TYPE_OBJECT:
771 case MONO_TYPE_SZARRAY:
772 case MONO_TYPE_ARRAY:
773 inst->type = STACK_OBJ;
777 inst->type = STACK_I8;
780 inst->type = cfg->r4_stack_type;
783 inst->type = STACK_R8;
785 case MONO_TYPE_VALUETYPE:
786 if (type->data.klass->enumtype) {
787 type = mono_class_enum_basetype (type->data.klass);
791 inst->type = STACK_VTYPE;
794 case MONO_TYPE_TYPEDBYREF:
795 inst->klass = mono_defaults.typed_reference_class;
796 inst->type = STACK_VTYPE;
798 case MONO_TYPE_GENERICINST:
799 type = &type->data.generic_class->container_class->byval_arg;
803 g_assert (cfg->generic_sharing_context);
804 if (mini_is_gsharedvt_type (cfg, type)) {
805 g_assert (cfg->gsharedvt);
806 inst->type = STACK_VTYPE;
808 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
812 g_error ("unknown type 0x%02x in eval stack type", type->type);
817 * The following tables are used to quickly validate the IL code in type_from_op ().
820 bin_num_table [STACK_MAX] [STACK_MAX] = {
821 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
826 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
834 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
837 /* reduce the size of this table */
839 bin_int_table [STACK_MAX] [STACK_MAX] = {
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
851 bin_comp_table [STACK_MAX] [STACK_MAX] = {
852 /* Inv i L p F & O vt r4 */
854 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
855 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
856 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
857 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
858 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
859 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
860 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
861 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
864 /* reduce the size of this table */
866 shift_table [STACK_MAX] [STACK_MAX] = {
867 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
878 * Tables to map from the non-specific opcode to the matching
879 * type-specific opcode.
881 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
883 binops_op_map [STACK_MAX] = {
884 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
887 /* handles from CEE_NEG to CEE_CONV_U8 */
889 unops_op_map [STACK_MAX] = {
890 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
893 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
895 ovfops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
899 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
901 ovf2ops_op_map [STACK_MAX] = {
902 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
905 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
907 ovf3ops_op_map [STACK_MAX] = {
908 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
911 /* handles from CEE_BEQ to CEE_BLT_UN */
913 beqops_op_map [STACK_MAX] = {
914 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
917 /* handles from CEE_CEQ to CEE_CLT_UN */
919 ceqops_op_map [STACK_MAX] = {
920 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
924 * Sets ins->type (the type on the eval stack) according to the
925 * type of the opcode and the arguments to it.
926 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
928 * FIXME: this function sets ins->type unconditionally in some cases, but
929 * it should set it to invalid for some types (a conv.x on an object)
932 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
934 switch (ins->opcode) {
941 /* FIXME: check unverifiable args for STACK_MP */
942 ins->type = bin_num_table [src1->type] [src2->type];
943 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_int_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
956 ins->type = shift_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
962 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
963 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
964 ins->opcode = OP_LCOMPARE;
965 else if (src1->type == STACK_R4)
966 ins->opcode = OP_RCOMPARE;
967 else if (src1->type == STACK_R8)
968 ins->opcode = OP_FCOMPARE;
970 ins->opcode = OP_ICOMPARE;
972 case OP_ICOMPARE_IMM:
973 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE_IMM;
987 ins->opcode += beqops_op_map [src1->type];
990 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
991 ins->opcode += ceqops_op_map [src1->type];
997 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
998 ins->opcode += ceqops_op_map [src1->type];
1002 ins->type = neg_table [src1->type];
1003 ins->opcode += unops_op_map [ins->type];
1006 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1007 ins->type = src1->type;
1009 ins->type = STACK_INV;
1010 ins->opcode += unops_op_map [ins->type];
1016 ins->type = STACK_I4;
1017 ins->opcode += unops_op_map [src1->type];
1020 ins->type = STACK_R8;
1021 switch (src1->type) {
1024 ins->opcode = OP_ICONV_TO_R_UN;
1027 ins->opcode = OP_LCONV_TO_R_UN;
1031 case CEE_CONV_OVF_I1:
1032 case CEE_CONV_OVF_U1:
1033 case CEE_CONV_OVF_I2:
1034 case CEE_CONV_OVF_U2:
1035 case CEE_CONV_OVF_I4:
1036 case CEE_CONV_OVF_U4:
1037 ins->type = STACK_I4;
1038 ins->opcode += ovf3ops_op_map [src1->type];
1040 case CEE_CONV_OVF_I_UN:
1041 case CEE_CONV_OVF_U_UN:
1042 ins->type = STACK_PTR;
1043 ins->opcode += ovf2ops_op_map [src1->type];
1045 case CEE_CONV_OVF_I1_UN:
1046 case CEE_CONV_OVF_I2_UN:
1047 case CEE_CONV_OVF_I4_UN:
1048 case CEE_CONV_OVF_U1_UN:
1049 case CEE_CONV_OVF_U2_UN:
1050 case CEE_CONV_OVF_U4_UN:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf2ops_op_map [src1->type];
1055 ins->type = STACK_PTR;
1056 switch (src1->type) {
1058 ins->opcode = OP_ICONV_TO_U;
1062 #if SIZEOF_VOID_P == 8
1063 ins->opcode = OP_LCONV_TO_U;
1065 ins->opcode = OP_MOVE;
1069 ins->opcode = OP_LCONV_TO_U;
1072 ins->opcode = OP_FCONV_TO_U;
1078 ins->type = STACK_I8;
1079 ins->opcode += unops_op_map [src1->type];
1081 case CEE_CONV_OVF_I8:
1082 case CEE_CONV_OVF_U8:
1083 ins->type = STACK_I8;
1084 ins->opcode += ovf3ops_op_map [src1->type];
1086 case CEE_CONV_OVF_U8_UN:
1087 case CEE_CONV_OVF_I8_UN:
1088 ins->type = STACK_I8;
1089 ins->opcode += ovf2ops_op_map [src1->type];
1092 ins->type = cfg->r4_stack_type;
1093 ins->opcode += unops_op_map [src1->type];
1096 ins->type = STACK_R8;
1097 ins->opcode += unops_op_map [src1->type];
1100 ins->type = STACK_R8;
1104 ins->type = STACK_I4;
1105 ins->opcode += ovfops_op_map [src1->type];
1108 case CEE_CONV_OVF_I:
1109 case CEE_CONV_OVF_U:
1110 ins->type = STACK_PTR;
1111 ins->opcode += ovfops_op_map [src1->type];
1114 case CEE_ADD_OVF_UN:
1116 case CEE_MUL_OVF_UN:
1118 case CEE_SUB_OVF_UN:
1119 ins->type = bin_num_table [src1->type] [src2->type];
1120 ins->opcode += ovfops_op_map [src1->type];
1121 if (ins->type == STACK_R8)
1122 ins->type = STACK_INV;
1124 case OP_LOAD_MEMBASE:
1125 ins->type = STACK_PTR;
1127 case OP_LOADI1_MEMBASE:
1128 case OP_LOADU1_MEMBASE:
1129 case OP_LOADI2_MEMBASE:
1130 case OP_LOADU2_MEMBASE:
1131 case OP_LOADI4_MEMBASE:
1132 case OP_LOADU4_MEMBASE:
1133 ins->type = STACK_PTR;
1135 case OP_LOADI8_MEMBASE:
1136 ins->type = STACK_I8;
1138 case OP_LOADR4_MEMBASE:
1139 ins->type = cfg->r4_stack_type;
1141 case OP_LOADR8_MEMBASE:
1142 ins->type = STACK_R8;
1145 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1149 if (ins->type == STACK_MP)
1150 ins->klass = mono_defaults.object_class;
1155 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1161 param_table [STACK_MAX] [STACK_MAX] = {
1166 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1170 switch (args->type) {
1180 for (i = 0; i < sig->param_count; ++i) {
1181 switch (args [i].type) {
1185 if (!sig->params [i]->byref)
1189 if (sig->params [i]->byref)
1191 switch (sig->params [i]->type) {
1192 case MONO_TYPE_CLASS:
1193 case MONO_TYPE_STRING:
1194 case MONO_TYPE_OBJECT:
1195 case MONO_TYPE_SZARRAY:
1196 case MONO_TYPE_ARRAY:
1203 if (sig->params [i]->byref)
1205 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1214 /*if (!param_table [args [i].type] [sig->params [i]->type])
1222 * When we need a pointer to the current domain many times in a method, we
1223 * call mono_domain_get() once and we store the result in a local variable.
1224 * This function returns the variable that represents the MonoDomain*.
1226 inline static MonoInst *
1227 mono_get_domainvar (MonoCompile *cfg)
1229 if (!cfg->domainvar)
1230 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1231 return cfg->domainvar;
1235 * The got_var contains the address of the Global Offset Table when AOT
1239 mono_get_got_var (MonoCompile *cfg)
1241 #ifdef MONO_ARCH_NEED_GOT_VAR
1242 if (!cfg->compile_aot)
1244 if (!cfg->got_var) {
1245 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 return cfg->got_var;
1254 mono_get_vtable_var (MonoCompile *cfg)
1256 g_assert (cfg->generic_sharing_context);
1258 if (!cfg->rgctx_var) {
1259 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1260 /* force the var to be stack allocated */
1261 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1264 return cfg->rgctx_var;
1268 type_from_stack_type (MonoInst *ins) {
1269 switch (ins->type) {
1270 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1271 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1272 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1273 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1274 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1276 return &ins->klass->this_arg;
1277 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1278 case STACK_VTYPE: return &ins->klass->byval_arg;
1280 g_error ("stack type %d to monotype not handled\n", ins->type);
1285 static G_GNUC_UNUSED int
1286 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1288 t = mono_type_get_underlying_type (t);
1300 case MONO_TYPE_FNPTR:
1302 case MONO_TYPE_CLASS:
1303 case MONO_TYPE_STRING:
1304 case MONO_TYPE_OBJECT:
1305 case MONO_TYPE_SZARRAY:
1306 case MONO_TYPE_ARRAY:
1312 return cfg->r4_stack_type;
1315 case MONO_TYPE_VALUETYPE:
1316 case MONO_TYPE_TYPEDBYREF:
1318 case MONO_TYPE_GENERICINST:
1319 if (mono_type_generic_inst_is_valuetype (t))
1325 g_assert_not_reached ();
1332 array_access_to_klass (int opcode)
1336 return mono_defaults.byte_class;
1338 return mono_defaults.uint16_class;
1341 return mono_defaults.int_class;
1344 return mono_defaults.sbyte_class;
1347 return mono_defaults.int16_class;
1350 return mono_defaults.int32_class;
1352 return mono_defaults.uint32_class;
1355 return mono_defaults.int64_class;
1358 return mono_defaults.single_class;
1361 return mono_defaults.double_class;
1362 case CEE_LDELEM_REF:
1363 case CEE_STELEM_REF:
1364 return mono_defaults.object_class;
1366 g_assert_not_reached ();
1372 * We try to share variables when possible
1375 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1380 /* inlining can result in deeper stacks */
1381 if (slot >= cfg->header->max_stack)
1382 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1384 pos = ins->type - 1 + slot * STACK_MAX;
1386 switch (ins->type) {
1393 if ((vnum = cfg->intvars [pos]))
1394 return cfg->varinfo [vnum];
1395 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1396 cfg->intvars [pos] = res->inst_c0;
1399 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1405 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1408 * Don't use this if a generic_context is set, since that means AOT can't
1409 * look up the method using just the image+token.
1410 * table == 0 means this is a reference made from a wrapper.
1412 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1413 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1414 jump_info_token->image = image;
1415 jump_info_token->token = token;
1416 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1421 * This function is called to handle items that are left on the evaluation stack
1422 * at basic block boundaries. What happens is that we save the values to local variables
1423 * and we reload them later when first entering the target basic block (with the
1424 * handle_loaded_temps () function).
1425 * A single joint point will use the same variables (stored in the array bb->out_stack or
1426 * bb->in_stack, if the basic block is before or after the joint point).
1428 * This function needs to be called _before_ emitting the last instruction of
1429 * the bb (i.e. before emitting a branch).
1430 * If the stack merge fails at a join point, cfg->unverifiable is set.
1433 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1436 MonoBasicBlock *bb = cfg->cbb;
1437 MonoBasicBlock *outb;
1438 MonoInst *inst, **locals;
1443 if (cfg->verbose_level > 3)
1444 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1445 if (!bb->out_scount) {
1446 bb->out_scount = count;
1447 //printf ("bblock %d has out:", bb->block_num);
1449 for (i = 0; i < bb->out_count; ++i) {
1450 outb = bb->out_bb [i];
1451 /* exception handlers are linked, but they should not be considered for stack args */
1452 if (outb->flags & BB_EXCEPTION_HANDLER)
1454 //printf (" %d", outb->block_num);
1455 if (outb->in_stack) {
1457 bb->out_stack = outb->in_stack;
1463 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1464 for (i = 0; i < count; ++i) {
1466 * try to reuse temps already allocated for this purpouse, if they occupy the same
1467 * stack slot and if they are of the same type.
1468 * This won't cause conflicts since if 'local' is used to
1469 * store one of the values in the in_stack of a bblock, then
1470 * the same variable will be used for the same outgoing stack
1472 * This doesn't work when inlining methods, since the bblocks
1473 * in the inlined methods do not inherit their in_stack from
1474 * the bblock they are inlined to. See bug #58863 for an
1477 if (cfg->inlined_method)
1478 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1480 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1485 for (i = 0; i < bb->out_count; ++i) {
1486 outb = bb->out_bb [i];
1487 /* exception handlers are linked, but they should not be considered for stack args */
1488 if (outb->flags & BB_EXCEPTION_HANDLER)
1490 if (outb->in_scount) {
1491 if (outb->in_scount != bb->out_scount) {
1492 cfg->unverifiable = TRUE;
1495 continue; /* check they are the same locals */
1497 outb->in_scount = count;
1498 outb->in_stack = bb->out_stack;
1501 locals = bb->out_stack;
1503 for (i = 0; i < count; ++i) {
1504 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1505 inst->cil_code = sp [i]->cil_code;
1506 sp [i] = locals [i];
1507 if (cfg->verbose_level > 3)
1508 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1512 * It is possible that the out bblocks already have in_stack assigned, and
1513 * the in_stacks differ. In this case, we will store to all the different
1520 /* Find a bblock which has a different in_stack */
1522 while (bindex < bb->out_count) {
1523 outb = bb->out_bb [bindex];
1524 /* exception handlers are linked, but they should not be considered for stack args */
1525 if (outb->flags & BB_EXCEPTION_HANDLER) {
1529 if (outb->in_stack != locals) {
1530 for (i = 0; i < count; ++i) {
1531 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1532 inst->cil_code = sp [i]->cil_code;
1533 sp [i] = locals [i];
1534 if (cfg->verbose_level > 3)
1535 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1537 locals = outb->in_stack;
1546 /* Emit code which loads interface_offsets [klass->interface_id]
1547 * The array is stored in memory before vtable.
1550 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1552 if (cfg->compile_aot) {
1553 int ioffset_reg = alloc_preg (cfg);
1554 int iid_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1566 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1568 int ibitmap_reg = alloc_preg (cfg);
1569 #ifdef COMPRESSED_INTERFACE_BITMAP
1571 MonoInst *res, *ins;
1572 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1573 MONO_ADD_INS (cfg->cbb, ins);
1575 if (cfg->compile_aot)
1576 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1578 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1579 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1580 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1582 int ibitmap_byte_reg = alloc_preg (cfg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1586 if (cfg->compile_aot) {
1587 int iid_reg = alloc_preg (cfg);
1588 int shifted_iid_reg = alloc_preg (cfg);
1589 int ibitmap_byte_address_reg = alloc_preg (cfg);
1590 int masked_iid_reg = alloc_preg (cfg);
1591 int iid_one_bit_reg = alloc_preg (cfg);
1592 int iid_bit_reg = alloc_preg (cfg);
1593 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1595 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1598 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1600 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1602 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1609 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1610 * stored in "klass_reg" implements the interface "klass".
1613 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1615 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1619 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1620 * stored in "vtable_reg" implements the interface "klass".
1623 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1625 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1629 * Emit code which checks whenever the interface id of @klass is smaller than
1630 * than the value given by max_iid_reg.
1633 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1634 MonoBasicBlock *false_target)
1636 if (cfg->compile_aot) {
1637 int iid_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1646 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1649 /* Same as above, but obtains max_iid from a vtable */
1651 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1652 MonoBasicBlock *false_target)
1654 int max_iid_reg = alloc_preg (cfg);
1656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1657 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1660 /* Same as above, but obtains max_iid from a klass */
1662 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1663 MonoBasicBlock *false_target)
1665 int max_iid_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1668 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1672 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 mono_class_setup_supertypes (klass);
1680 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1688 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1689 } else if (cfg->compile_aot) {
1690 int const_reg = alloc_preg (cfg);
1691 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1700 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1702 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1706 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1708 int intf_reg = alloc_preg (cfg);
1710 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1711 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1716 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1720 * Variant of the above that takes a register to the class, not the vtable.
1723 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1725 int intf_bit_reg = alloc_preg (cfg);
1727 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1728 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1733 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1737 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 } else if (cfg->compile_aot) {
1742 int const_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1752 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1754 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1758 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1760 if (cfg->compile_aot) {
1761 int const_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1771 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1774 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1777 int rank_reg = alloc_preg (cfg);
1778 int eclass_reg = alloc_preg (cfg);
1780 g_assert (!klass_inst);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1783 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1784 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1786 if (klass->cast_class == mono_defaults.object_class) {
1787 int parent_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1789 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1792 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class) {
1795 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1796 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1797 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1799 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1800 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1803 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1804 /* Check that the object is a vector too */
1805 int bounds_reg = alloc_preg (cfg);
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1808 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1811 int idepth_reg = alloc_preg (cfg);
1812 int stypes_reg = alloc_preg (cfg);
1813 int stype = alloc_preg (cfg);
1815 mono_class_setup_supertypes (klass);
1817 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1820 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1824 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1829 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1831 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1835 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1839 g_assert (val == 0);
1844 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1855 #if SIZEOF_REGISTER == 8
1857 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1863 val_reg = alloc_preg (cfg);
1865 if (SIZEOF_REGISTER == 8)
1866 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1868 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1871 /* This could be optimized further if neccesary */
1873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1880 #if !NO_UNALIGNED_ACCESS
1881 if (SIZEOF_REGISTER == 8) {
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1913 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1920 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1921 g_assert (size < 10000);
1924 /* This could be optimized further if neccesary */
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 #if !NO_UNALIGNED_ACCESS
1936 if (SIZEOF_REGISTER == 8) {
1938 cur_reg = alloc_preg (cfg);
1939 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1965 cur_reg = alloc_preg (cfg);
1966 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1975 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1979 if (cfg->compile_aot) {
1980 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1981 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1983 ins->sreg2 = c->dreg;
1984 MONO_ADD_INS (cfg->cbb, ins);
1986 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1988 ins->inst_offset = mini_get_tls_offset (tls_key);
1989 MONO_ADD_INS (cfg->cbb, ins);
1996 * Emit IR to push the current LMF onto the LMF stack.
1999 emit_push_lmf (MonoCompile *cfg)
2002 * Emit IR to push the LMF:
2003 * lmf_addr = <lmf_addr from tls>
2004 * lmf->lmf_addr = lmf_addr
2005 * lmf->prev_lmf = *lmf_addr
2008 int lmf_reg, prev_lmf_reg;
2009 MonoInst *ins, *lmf_ins;
2014 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2015 /* Load current lmf */
2016 lmf_ins = mono_get_lmf_intrinsic (cfg);
2018 MONO_ADD_INS (cfg->cbb, lmf_ins);
2019 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2020 lmf_reg = ins->dreg;
2021 /* Save previous_lmf */
2022 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2024 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2027 * Store lmf_addr in a variable, so it can be allocated to a global register.
2029 if (!cfg->lmf_addr_var)
2030 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2033 ins = mono_get_jit_tls_intrinsic (cfg);
2035 int jit_tls_dreg = ins->dreg;
2037 MONO_ADD_INS (cfg->cbb, ins);
2038 lmf_reg = alloc_preg (cfg);
2039 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2041 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2046 MONO_ADD_INS (cfg->cbb, lmf_ins);
2049 MonoInst *args [16], *jit_tls_ins, *ins;
2051 /* Inline mono_get_lmf_addr () */
2052 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2054 /* Load mono_jit_tls_id */
2055 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2056 /* call pthread_getspecific () */
2057 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2058 /* lmf_addr = &jit_tls->lmf */
2059 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2062 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2066 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2068 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2069 lmf_reg = ins->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 /* Save previous_lmf */
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2076 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2083 * Emit IR to pop the current LMF from the LMF stack.
2086 emit_pop_lmf (MonoCompile *cfg)
2088 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2094 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2095 lmf_reg = ins->dreg;
2097 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2098 /* Load previous_lmf */
2099 prev_lmf_reg = alloc_preg (cfg);
2100 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2102 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2105 * Emit IR to pop the LMF:
2106 * *(lmf->lmf_addr) = lmf->prev_lmf
2108 /* This could be called before emit_push_lmf () */
2109 if (!cfg->lmf_addr_var)
2110 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2111 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2113 prev_lmf_reg = alloc_preg (cfg);
2114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2115 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2120 emit_instrumentation_call (MonoCompile *cfg, void *func)
2122 MonoInst *iargs [1];
2125 * Avoid instrumenting inlined methods since it can
2126 * distort profiling results.
2128 if (cfg->method != cfg->current_method)
2131 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2132 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2133 mono_emit_jit_icall (cfg, func, iargs);
2138 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2141 type = mini_get_underlying_type (cfg, type);
2142 switch (type->type) {
2143 case MONO_TYPE_VOID:
2144 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2155 case MONO_TYPE_FNPTR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 case MONO_TYPE_CLASS:
2158 case MONO_TYPE_STRING:
2159 case MONO_TYPE_OBJECT:
2160 case MONO_TYPE_SZARRAY:
2161 case MONO_TYPE_ARRAY:
2162 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2165 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2168 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2170 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2172 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 case MONO_TYPE_VALUETYPE:
2174 if (type->data.klass->enumtype) {
2175 type = mono_class_enum_basetype (type->data.klass);
2178 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2179 case MONO_TYPE_TYPEDBYREF:
2180 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 case MONO_TYPE_GENERICINST:
2182 type = &type->data.generic_class->container_class->byval_arg;
2185 case MONO_TYPE_MVAR:
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2189 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2195 * target_type_is_incompatible:
2196 * @cfg: MonoCompile context
2198 * Check that the item @arg on the evaluation stack can be stored
2199 * in the target type (can be a local, or field, etc).
2200 * The cfg arg can be used to check if we need verification or just
2203 * Returns: non-0 value if arg can't be stored on a target.
2206 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2208 MonoType *simple_type;
2211 if (target->byref) {
2212 /* FIXME: check that the pointed to types match */
2213 if (arg->type == STACK_MP)
2214 return arg->klass != mono_class_from_mono_type (target);
2215 if (arg->type == STACK_PTR)
2220 simple_type = mini_get_underlying_type (cfg, target);
2221 switch (simple_type->type) {
2222 case MONO_TYPE_VOID:
2230 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2234 /* STACK_MP is needed when setting pinned locals */
2235 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_FNPTR:
2242 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2243 * in native int. (#688008).
2245 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2248 case MONO_TYPE_CLASS:
2249 case MONO_TYPE_STRING:
2250 case MONO_TYPE_OBJECT:
2251 case MONO_TYPE_SZARRAY:
2252 case MONO_TYPE_ARRAY:
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 if (arg->type != STACK_I8)
2263 if (arg->type != cfg->r4_stack_type)
2267 if (arg->type != STACK_R8)
2270 case MONO_TYPE_VALUETYPE:
2271 if (arg->type != STACK_VTYPE)
2273 klass = mono_class_from_mono_type (simple_type);
2274 if (klass != arg->klass)
2277 case MONO_TYPE_TYPEDBYREF:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_GENERICINST:
2285 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 if (klass != arg->klass)
2293 if (arg->type != STACK_OBJ)
2295 /* FIXME: check type compatibility */
2299 case MONO_TYPE_MVAR:
2300 g_assert (cfg->generic_sharing_context);
2301 if (mini_type_var_is_vt (cfg, simple_type)) {
2302 if (arg->type != STACK_VTYPE)
2305 if (arg->type != STACK_OBJ)
2310 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2316 * Prepare arguments for passing to a function call.
2317 * Return a non-zero value if the arguments can't be passed to the given
2319 * The type checks are not yet complete and some conversions may need
2320 * casts on 32 or 64 bit architectures.
2322 * FIXME: implement this using target_type_is_incompatible ()
2325 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2327 MonoType *simple_type;
2331 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2335 for (i = 0; i < sig->param_count; ++i) {
2336 if (sig->params [i]->byref) {
2337 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2341 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2343 switch (simple_type->type) {
2344 case MONO_TYPE_VOID:
2353 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2359 case MONO_TYPE_FNPTR:
2360 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2363 case MONO_TYPE_CLASS:
2364 case MONO_TYPE_STRING:
2365 case MONO_TYPE_OBJECT:
2366 case MONO_TYPE_SZARRAY:
2367 case MONO_TYPE_ARRAY:
2368 if (args [i]->type != STACK_OBJ)
2373 if (args [i]->type != STACK_I8)
2377 if (args [i]->type != cfg->r4_stack_type)
2381 if (args [i]->type != STACK_R8)
2384 case MONO_TYPE_VALUETYPE:
2385 if (simple_type->data.klass->enumtype) {
2386 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2389 if (args [i]->type != STACK_VTYPE)
2392 case MONO_TYPE_TYPEDBYREF:
2393 if (args [i]->type != STACK_VTYPE)
2396 case MONO_TYPE_GENERICINST:
2397 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2400 case MONO_TYPE_MVAR:
2402 if (args [i]->type != STACK_VTYPE)
2406 g_error ("unknown type 0x%02x in check_call_signature",
2414 callvirt_to_call (int opcode)
2417 case OP_CALL_MEMBASE:
2419 case OP_VOIDCALL_MEMBASE:
2421 case OP_FCALL_MEMBASE:
2423 case OP_RCALL_MEMBASE:
2425 case OP_VCALL_MEMBASE:
2427 case OP_LCALL_MEMBASE:
2430 g_assert_not_reached ();
2436 /* Either METHOD or IMT_ARG needs to be set */
2438 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2442 if (COMPILE_LLVM (cfg)) {
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2458 call->imt_arg_reg = method_reg;
2460 #ifdef MONO_ARCH_IMT_REG
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2463 /* Need this to keep the IMT arg alive */
2464 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2469 #ifdef MONO_ARCH_IMT_REG
2470 method_reg = alloc_preg (cfg);
2473 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2474 } else if (cfg->compile_aot) {
2475 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2478 MONO_INST_NEW (cfg, ins, OP_PCONST);
2479 ins->inst_p0 = method;
2480 ins->dreg = method_reg;
2481 MONO_ADD_INS (cfg->cbb, ins);
2484 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2486 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2490 static MonoJumpInfo *
2491 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2493 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2497 ji->data.target = target;
2503 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2505 if (cfg->generic_sharing_context)
2506 return mono_class_check_context_used (klass);
2512 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2514 if (cfg->generic_sharing_context)
2515 return mono_method_check_context_used (method);
2521 * check_method_sharing:
2523 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2526 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2528 gboolean pass_vtable = FALSE;
2529 gboolean pass_mrgctx = FALSE;
2531 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2532 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2533 gboolean sharable = FALSE;
2535 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2538 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2539 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2540 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2542 sharable = sharing_enabled && context_sharable;
2546 * Pass vtable iff target method might
2547 * be shared, which means that sharing
2548 * is enabled for its class and its
2549 * context is sharable (and it's not a
2552 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2556 if (mini_method_get_context (cmethod) &&
2557 mini_method_get_context (cmethod)->method_inst) {
2558 g_assert (!pass_vtable);
2560 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2563 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2564 MonoGenericContext *context = mini_method_get_context (cmethod);
2565 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2567 if (sharing_enabled && context_sharable)
2569 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2574 if (out_pass_vtable)
2575 *out_pass_vtable = pass_vtable;
2576 if (out_pass_mrgctx)
2577 *out_pass_mrgctx = pass_mrgctx;
2580 inline static MonoCallInst *
2581 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2582 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2586 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2591 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2593 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2595 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2598 call->signature = sig;
2599 call->rgctx_reg = rgctx;
2600 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2602 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2605 if (mini_type_is_vtype (cfg, sig_ret)) {
2606 call->vret_var = cfg->vret_addr;
2607 //g_assert_not_reached ();
2609 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2610 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2613 temp->backend.is_pinvoke = sig->pinvoke;
2616 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2617 * address of return value to increase optimization opportunities.
2618 * Before vtype decomposition, the dreg of the call ins itself represents the
2619 * fact the call modifies the return value. After decomposition, the call will
2620 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2621 * will be transformed into an LDADDR.
2623 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2624 loada->dreg = alloc_preg (cfg);
2625 loada->inst_p0 = temp;
2626 /* We reference the call too since call->dreg could change during optimization */
2627 loada->inst_p1 = call;
2628 MONO_ADD_INS (cfg->cbb, loada);
2630 call->inst.dreg = temp->dreg;
2632 call->vret_var = loada;
2633 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2634 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2636 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2637 if (COMPILE_SOFT_FLOAT (cfg)) {
2639 * If the call has a float argument, we would need to do an r8->r4 conversion using
2640 * an icall, but that cannot be done during the call sequence since it would clobber
2641 * the call registers + the stack. So we do it before emitting the call.
2643 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2645 MonoInst *in = call->args [i];
2647 if (i >= sig->hasthis)
2648 t = sig->params [i - sig->hasthis];
2650 t = &mono_defaults.int_class->byval_arg;
2651 t = mono_type_get_underlying_type (t);
2653 if (!t->byref && t->type == MONO_TYPE_R4) {
2654 MonoInst *iargs [1];
2658 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2660 /* The result will be in an int vreg */
2661 call->args [i] = conv;
2667 call->need_unbox_trampoline = unbox_trampoline;
2670 if (COMPILE_LLVM (cfg))
2671 mono_llvm_emit_call (cfg, call);
2673 mono_arch_emit_call (cfg, call);
2675 mono_arch_emit_call (cfg, call);
2678 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2679 cfg->flags |= MONO_CFG_HAS_CALLS;
2685 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2687 #ifdef MONO_ARCH_RGCTX_REG
2688 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2689 cfg->uses_rgctx_reg = TRUE;
2690 call->rgctx_reg = TRUE;
2692 call->rgctx_arg_reg = rgctx_reg;
2699 inline static MonoInst*
2700 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2705 gboolean check_sp = FALSE;
2707 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2708 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2710 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2715 rgctx_reg = mono_alloc_preg (cfg);
2716 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2720 if (!cfg->stack_inbalance_var)
2721 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2723 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2724 ins->dreg = cfg->stack_inbalance_var->dreg;
2725 MONO_ADD_INS (cfg->cbb, ins);
2728 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2730 call->inst.sreg1 = addr->dreg;
2733 emit_imt_argument (cfg, call, NULL, imt_arg);
2735 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2740 sp_reg = mono_alloc_preg (cfg);
2742 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2744 MONO_ADD_INS (cfg->cbb, ins);
2746 /* Restore the stack so we don't crash when throwing the exception */
2747 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2748 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2749 MONO_ADD_INS (cfg->cbb, ins);
2751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2752 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2756 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2758 return (MonoInst*)call;
2762 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2765 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2767 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2770 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2771 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2773 #ifndef DISABLE_REMOTING
2774 gboolean might_be_remote = FALSE;
2776 gboolean virtual = this != NULL;
2777 gboolean enable_for_aot = TRUE;
2781 gboolean need_unbox_trampoline;
2784 sig = mono_method_signature (method);
2787 rgctx_reg = mono_alloc_preg (cfg);
2788 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2791 if (method->string_ctor) {
2792 /* Create the real signature */
2793 /* FIXME: Cache these */
2794 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2795 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2800 context_used = mini_method_check_context_used (cfg, method);
2802 #ifndef DISABLE_REMOTING
2803 might_be_remote = this && sig->hasthis &&
2804 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2805 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2807 if (might_be_remote && context_used) {
2810 g_assert (cfg->generic_sharing_context);
2812 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2814 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2818 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2820 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2822 #ifndef DISABLE_REMOTING
2823 if (might_be_remote)
2824 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2827 call->method = method;
2828 call->inst.flags |= MONO_INST_HAS_METHOD;
2829 call->inst.inst_left = this;
2830 call->tail_call = tail;
2833 int vtable_reg, slot_reg, this_reg;
2836 this_reg = this->dreg;
2838 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2839 MonoInst *dummy_use;
2841 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2843 /* Make a call to delegate->invoke_impl */
2844 call->inst.inst_basereg = this_reg;
2845 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2846 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2848 /* We must emit a dummy use here because the delegate trampoline will
2849 replace the 'this' argument with the delegate target making this activation
2850 no longer a root for the delegate.
2851 This is an issue for delegates that target collectible code such as dynamic
2852 methods of GC'able assemblies.
2854 For a test case look into #667921.
2856 FIXME: a dummy use is not the best way to do it as the local register allocator
2857 will put it on a caller save register and spil it around the call.
2858 Ideally, we would either put it on a callee save register or only do the store part.
2860 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2862 return (MonoInst*)call;
2865 if ((!cfg->compile_aot || enable_for_aot) &&
2866 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2867 (MONO_METHOD_IS_FINAL (method) &&
2868 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2869 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2871 * the method is not virtual, we just need to ensure this is not null
2872 * and then we can call the method directly.
2874 #ifndef DISABLE_REMOTING
2875 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2877 * The check above ensures method is not gshared, this is needed since
2878 * gshared methods can't have wrappers.
2880 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2884 if (!method->string_ctor)
2885 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2887 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2888 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2890 * the method is virtual, but we can statically dispatch since either
2891 * it's class or the method itself are sealed.
2892 * But first we need to ensure it's not a null reference.
2894 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2896 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2898 vtable_reg = alloc_preg (cfg);
2899 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2900 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2901 guint32 imt_slot = mono_method_get_imt_slot (method);
2902 emit_imt_argument (cfg, call, call->method, imt_arg);
2903 slot_reg = vtable_reg;
2904 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2906 slot_reg = vtable_reg;
2907 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2908 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2910 g_assert (mono_method_signature (method)->generic_param_count);
2911 emit_imt_argument (cfg, call, call->method, imt_arg);
2915 call->inst.sreg1 = slot_reg;
2916 call->inst.inst_offset = offset;
2917 call->virtual = TRUE;
2921 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2924 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2926 return (MonoInst*)call;
2930 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2932 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2936 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2943 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2946 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2948 return (MonoInst*)call;
2952 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2954 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2958 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2962 * mono_emit_abs_call:
2964 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2966 inline static MonoInst*
2967 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2968 MonoMethodSignature *sig, MonoInst **args)
2970 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2974 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2977 if (cfg->abs_patches == NULL)
2978 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2979 g_hash_table_insert (cfg->abs_patches, ji, ji);
2980 ins = mono_emit_native_call (cfg, ji, sig, args);
2981 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2986 direct_icalls_enabled (MonoCompile *cfg)
2988 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2990 if (cfg->compile_llvm)
2993 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2999 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3002 * Call the jit icall without a wrapper if possible.
3003 * The wrapper is needed for the following reasons:
3004 * - to handle exceptions thrown using mono_raise_exceptions () from the
3005 * icall function. The EH code needs the lmf frame pushed by the
3006 * wrapper to be able to unwind back to managed code.
3007 * - to be able to do stack walks for asynchronously suspended
3008 * threads when debugging.
3010 if (info->no_raise && direct_icalls_enabled (cfg)) {
3014 if (!info->wrapper_method) {
3015 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3016 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3018 mono_memory_barrier ();
3022 * Inline the wrapper method, which is basically a call to the C icall, and
3023 * an exception check.
3025 costs = inline_method (cfg, info->wrapper_method, NULL,
3026 args, NULL, cfg->real_offset, TRUE, out_cbb);
3027 g_assert (costs > 0);
3028 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3032 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3037 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3039 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3040 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3044 * Native code might return non register sized integers
3045 * without initializing the upper bits.
3047 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3048 case OP_LOADI1_MEMBASE:
3049 widen_op = OP_ICONV_TO_I1;
3051 case OP_LOADU1_MEMBASE:
3052 widen_op = OP_ICONV_TO_U1;
3054 case OP_LOADI2_MEMBASE:
3055 widen_op = OP_ICONV_TO_I2;
3057 case OP_LOADU2_MEMBASE:
3058 widen_op = OP_ICONV_TO_U2;
3064 if (widen_op != -1) {
3065 int dreg = alloc_preg (cfg);
3068 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3069 widen->type = ins->type;
3079 get_memcpy_method (void)
3081 static MonoMethod *memcpy_method = NULL;
3082 if (!memcpy_method) {
3083 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3085 g_error ("Old corlib found. Install a new one");
3087 return memcpy_method;
3091 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3093 MonoClassField *field;
3094 gpointer iter = NULL;
3096 while ((field = mono_class_get_fields (klass, &iter))) {
3099 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3101 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3102 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3103 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3104 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3106 MonoClass *field_class = mono_class_from_mono_type (field->type);
3107 if (field_class->has_references)
3108 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3114 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3116 int card_table_shift_bits;
3117 gpointer card_table_mask;
3119 MonoInst *dummy_use;
3120 int nursery_shift_bits;
3121 size_t nursery_size;
3122 gboolean has_card_table_wb = FALSE;
3124 if (!cfg->gen_write_barriers)
3127 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3129 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3131 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3132 has_card_table_wb = TRUE;
3135 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3138 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3139 wbarrier->sreg1 = ptr->dreg;
3140 wbarrier->sreg2 = value->dreg;
3141 MONO_ADD_INS (cfg->cbb, wbarrier);
3142 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3143 int offset_reg = alloc_preg (cfg);
3144 int card_reg = alloc_preg (cfg);
3147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3148 if (card_table_mask)
3149 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3151 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3152 * IMM's larger than 32bits.
3154 if (cfg->compile_aot) {
3155 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3157 MONO_INST_NEW (cfg, ins, OP_PCONST);
3158 ins->inst_p0 = card_table;
3159 ins->dreg = card_reg;
3160 MONO_ADD_INS (cfg->cbb, ins);
3163 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3164 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3166 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3167 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3170 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3174 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3176 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3177 unsigned need_wb = 0;
3182 /*types with references can't have alignment smaller than sizeof(void*) */
3183 if (align < SIZEOF_VOID_P)
3186 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3187 if (size > 32 * SIZEOF_VOID_P)
3190 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3192 /* We don't unroll more than 5 stores to avoid code bloat. */
3193 if (size > 5 * SIZEOF_VOID_P) {
3194 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3195 size += (SIZEOF_VOID_P - 1);
3196 size &= ~(SIZEOF_VOID_P - 1);
3198 EMIT_NEW_ICONST (cfg, iargs [2], size);
3199 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3200 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3204 destreg = iargs [0]->dreg;
3205 srcreg = iargs [1]->dreg;
3208 dest_ptr_reg = alloc_preg (cfg);
3209 tmp_reg = alloc_preg (cfg);
3212 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3214 while (size >= SIZEOF_VOID_P) {
3215 MonoInst *load_inst;
3216 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3217 load_inst->dreg = tmp_reg;
3218 load_inst->inst_basereg = srcreg;
3219 load_inst->inst_offset = offset;
3220 MONO_ADD_INS (cfg->cbb, load_inst);
3222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3225 emit_write_barrier (cfg, iargs [0], load_inst);
3227 offset += SIZEOF_VOID_P;
3228 size -= SIZEOF_VOID_P;
3231 /*tmp += sizeof (void*)*/
3232 if (size >= SIZEOF_VOID_P) {
3233 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3234 MONO_ADD_INS (cfg->cbb, iargs [0]);
3238 /* Those cannot be references since size < sizeof (void*) */
3240 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3241 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3247 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3248 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3254 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3264 * Emit code to copy a valuetype of type @klass whose address is stored in
3265 * @src->dreg to memory whose address is stored at @dest->dreg.
3268 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3270 MonoInst *iargs [4];
3273 MonoMethod *memcpy_method;
3274 MonoInst *size_ins = NULL;
3275 MonoInst *memcpy_ins = NULL;
3278 if (cfg->generic_sharing_context)
3279 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3282 * This check breaks with spilled vars... need to handle it during verification anyway.
3283 * g_assert (klass && klass == src->klass && klass == dest->klass);
3286 if (mini_is_gsharedvt_klass (cfg, klass)) {
3288 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3289 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3293 n = mono_class_native_size (klass, &align);
3295 n = mono_class_value_size (klass, &align);
3297 /* if native is true there should be no references in the struct */
3298 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3299 /* Avoid barriers when storing to the stack */
3300 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3301 (dest->opcode == OP_LDADDR))) {
3307 context_used = mini_class_check_context_used (cfg, klass);
3309 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3310 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3312 } else if (context_used) {
3313 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3315 if (cfg->compile_aot) {
3316 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3318 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3319 mono_class_compute_gc_descriptor (klass);
3324 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3326 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3331 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3332 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3333 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3338 iargs [2] = size_ins;
3340 EMIT_NEW_ICONST (cfg, iargs [2], n);
3342 memcpy_method = get_memcpy_method ();
3344 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3346 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3351 get_memset_method (void)
3353 static MonoMethod *memset_method = NULL;
3354 if (!memset_method) {
3355 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3357 g_error ("Old corlib found. Install a new one");
3359 return memset_method;
3363 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3365 MonoInst *iargs [3];
3368 MonoMethod *memset_method;
3369 MonoInst *size_ins = NULL;
3370 MonoInst *bzero_ins = NULL;
3371 static MonoMethod *bzero_method;
3373 /* FIXME: Optimize this for the case when dest is an LDADDR */
3374 mono_class_init (klass);
3375 if (mini_is_gsharedvt_klass (cfg, klass)) {
3376 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3377 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3379 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3380 g_assert (bzero_method);
3382 iargs [1] = size_ins;
3383 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3387 n = mono_class_value_size (klass, &align);
3389 if (n <= sizeof (gpointer) * 8) {
3390 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3393 memset_method = get_memset_method ();
3395 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3396 EMIT_NEW_ICONST (cfg, iargs [2], n);
3397 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3402 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3404 MonoInst *this = NULL;
3406 g_assert (cfg->generic_sharing_context);
3408 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3409 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3410 !method->klass->valuetype)
3411 EMIT_NEW_ARGLOAD (cfg, this, 0);
3413 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3414 MonoInst *mrgctx_loc, *mrgctx_var;
3417 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3419 mrgctx_loc = mono_get_vtable_var (cfg);
3420 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3423 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3424 MonoInst *vtable_loc, *vtable_var;
3428 vtable_loc = mono_get_vtable_var (cfg);
3429 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3431 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3432 MonoInst *mrgctx_var = vtable_var;
3435 vtable_reg = alloc_preg (cfg);
3436 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3437 vtable_var->type = STACK_PTR;
3445 vtable_reg = alloc_preg (cfg);
3446 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3451 static MonoJumpInfoRgctxEntry *
3452 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3454 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3455 res->method = method;
3456 res->in_mrgctx = in_mrgctx;
3457 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3458 res->data->type = patch_type;
3459 res->data->data.target = patch_data;
3460 res->info_type = info_type;
3465 static inline MonoInst*
3466 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3468 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3472 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3473 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3475 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3476 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3483 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3485 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3486 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3488 return emit_rgctx_fetch (cfg, rgctx, entry);
3492 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3493 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3495 MonoJumpInfoGSharedVtCall *call_info;
3496 MonoJumpInfoRgctxEntry *entry;
3499 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3500 call_info->sig = sig;
3501 call_info->method = cmethod;
3503 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3504 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3506 return emit_rgctx_fetch (cfg, rgctx, entry);
3510 * emit_get_rgctx_virt_method:
3512 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3515 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3516 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3518 MonoJumpInfoVirtMethod *info;
3519 MonoJumpInfoRgctxEntry *entry;
3522 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3523 info->klass = klass;
3524 info->method = virt_method;
3526 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3527 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3529 return emit_rgctx_fetch (cfg, rgctx, entry);
3533 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3534 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3536 MonoJumpInfoRgctxEntry *entry;
3539 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3540 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3542 return emit_rgctx_fetch (cfg, rgctx, entry);
3546 * emit_get_rgctx_method:
3548 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3549 * normal constants, else emit a load from the rgctx.
3552 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3553 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3555 if (!context_used) {
3558 switch (rgctx_type) {
3559 case MONO_RGCTX_INFO_METHOD:
3560 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3562 case MONO_RGCTX_INFO_METHOD_RGCTX:
3563 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3566 g_assert_not_reached ();
3569 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3570 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3572 return emit_rgctx_fetch (cfg, rgctx, entry);
3577 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3578 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3580 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3581 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3583 return emit_rgctx_fetch (cfg, rgctx, entry);
3587 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3589 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3590 MonoRuntimeGenericContextInfoTemplate *template;
3595 for (i = 0; i < info->num_entries; ++i) {
3596 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3598 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3602 if (info->num_entries == info->count_entries) {
3603 MonoRuntimeGenericContextInfoTemplate *new_entries;
3604 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3606 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3608 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3609 info->entries = new_entries;
3610 info->count_entries = new_count_entries;
3613 idx = info->num_entries;
3614 template = &info->entries [idx];
3615 template->info_type = rgctx_type;
3616 template->data = data;
3618 info->num_entries ++;
3624 * emit_get_gsharedvt_info:
3626 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3629 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3634 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3635 /* Load info->entries [idx] */
3636 dreg = alloc_preg (cfg);
3637 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3643 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3645 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3649 * On return the caller must check @klass for load errors.
3652 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3654 MonoInst *vtable_arg;
3658 context_used = mini_class_check_context_used (cfg, klass);
3661 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3662 klass, MONO_RGCTX_INFO_VTABLE);
3664 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3668 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3671 if (COMPILE_LLVM (cfg))
3672 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3674 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3675 #ifdef MONO_ARCH_VTABLE_REG
3676 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3677 cfg->uses_vtable_reg = TRUE;
3684 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3688 if (cfg->gen_seq_points && cfg->method == method) {
3689 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3691 ins->flags |= MONO_INST_NONEMPTY_STACK;
3692 MONO_ADD_INS (cfg->cbb, ins);
3697 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3699 if (mini_get_debug_options ()->better_cast_details) {
3700 int vtable_reg = alloc_preg (cfg);
3701 int klass_reg = alloc_preg (cfg);
3702 MonoBasicBlock *is_null_bb = NULL;
3704 int to_klass_reg, context_used;
3707 NEW_BBLOCK (cfg, is_null_bb);
3709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3713 tls_get = mono_get_jit_tls_intrinsic (cfg);
3715 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3719 MONO_ADD_INS (cfg->cbb, tls_get);
3720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3725 context_used = mini_class_check_context_used (cfg, klass);
3727 MonoInst *class_ins;
3729 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3730 to_klass_reg = class_ins->dreg;
3732 to_klass_reg = alloc_preg (cfg);
3733 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3738 MONO_START_BB (cfg, is_null_bb);
3740 *out_bblock = cfg->cbb;
3746 reset_cast_details (MonoCompile *cfg)
3748 /* Reset the variables holding the cast details */
3749 if (mini_get_debug_options ()->better_cast_details) {
3750 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3752 MONO_ADD_INS (cfg->cbb, tls_get);
3753 /* It is enough to reset the from field */
3754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3759 * On return the caller must check @array_class for load errors
3762 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3764 int vtable_reg = alloc_preg (cfg);
3767 context_used = mini_class_check_context_used (cfg, array_class);
3769 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3771 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3773 if (cfg->opt & MONO_OPT_SHARED) {
3774 int class_reg = alloc_preg (cfg);
3775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3776 if (cfg->compile_aot) {
3777 int klass_reg = alloc_preg (cfg);
3778 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3779 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3783 } else if (context_used) {
3784 MonoInst *vtable_ins;
3786 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3787 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3789 if (cfg->compile_aot) {
3793 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3795 vt_reg = alloc_preg (cfg);
3796 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3800 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3802 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3806 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3808 reset_cast_details (cfg);
3812 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3813 * generic code is generated.
3816 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3818 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3821 MonoInst *rgctx, *addr;
3823 /* FIXME: What if the class is shared? We might not
3824 have to get the address of the method from the
3826 addr = emit_get_rgctx_method (cfg, context_used, method,
3827 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3829 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3831 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3833 gboolean pass_vtable, pass_mrgctx;
3834 MonoInst *rgctx_arg = NULL;
3836 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3837 g_assert (!pass_mrgctx);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3843 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3846 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3851 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3855 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3856 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3857 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3858 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3860 obj_reg = sp [0]->dreg;
3861 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3862 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3864 /* FIXME: generics */
3865 g_assert (klass->rank == 0);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3869 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3875 MonoInst *element_class;
3877 /* This assertion is from the unboxcast insn */
3878 g_assert (klass->rank == 0);
3880 element_class = emit_get_rgctx_klass (cfg, context_used,
3881 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3883 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3884 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3886 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3887 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3888 reset_cast_details (cfg);
3891 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3892 MONO_ADD_INS (cfg->cbb, add);
3893 add->type = STACK_MP;
3900 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3902 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3907 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3913 args [1] = klass_inst;
3916 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3918 NEW_BBLOCK (cfg, is_ref_bb);
3919 NEW_BBLOCK (cfg, is_nullable_bb);
3920 NEW_BBLOCK (cfg, end_bb);
3921 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3926 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3928 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3929 addr_reg = alloc_dreg (cfg, STACK_MP);
3933 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3934 MONO_ADD_INS (cfg->cbb, addr);
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3939 MONO_START_BB (cfg, is_ref_bb);
3941 /* Save the ref to a temporary */
3942 dreg = alloc_ireg (cfg);
3943 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3944 addr->dreg = addr_reg;
3945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3946 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3949 MONO_START_BB (cfg, is_nullable_bb);
3952 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3953 MonoInst *unbox_call;
3954 MonoMethodSignature *unbox_sig;
3956 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 unbox_sig->ret = &klass->byval_arg;
3958 unbox_sig->param_count = 1;
3959 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3960 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3962 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3963 addr->dreg = addr_reg;
3966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3969 MONO_START_BB (cfg, end_bb);
3972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3974 *out_cbb = cfg->cbb;
3980 * Returns NULL and set the cfg exception on error.
3983 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3985 MonoInst *iargs [2];
3991 MonoInst *iargs [2];
3992 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3994 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3996 if (cfg->opt & MONO_OPT_SHARED)
3997 rgctx_info = MONO_RGCTX_INFO_KLASS;
3999 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4000 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4002 if (cfg->opt & MONO_OPT_SHARED) {
4003 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4005 alloc_ftn = mono_object_new;
4008 alloc_ftn = mono_object_new_specific;
4011 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4012 if (known_instance_size) {
4013 int size = mono_class_instance_size (klass);
4015 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4017 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4020 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4023 if (cfg->opt & MONO_OPT_SHARED) {
4024 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4025 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4027 alloc_ftn = mono_object_new;
4028 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4029 /* This happens often in argument checking code, eg. throw new FooException... */
4030 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4031 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4032 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4034 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4035 MonoMethod *managed_alloc = NULL;
4039 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4040 cfg->exception_ptr = klass;
4044 #ifndef MONO_CROSS_COMPILE
4045 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4048 if (managed_alloc) {
4049 int size = mono_class_instance_size (klass);
4051 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4052 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4053 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4055 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4057 guint32 lw = vtable->klass->instance_size;
4058 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4059 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4060 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4063 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4067 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4071 * Returns NULL and set the cfg exception on error.
4074 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4076 MonoInst *alloc, *ins;
4078 *out_cbb = cfg->cbb;
4080 if (mono_class_is_nullable (klass)) {
4081 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4084 /* FIXME: What if the class is shared? We might not
4085 have to get the method address from the RGCTX. */
4086 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4087 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4088 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4090 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4092 gboolean pass_vtable, pass_mrgctx;
4093 MonoInst *rgctx_arg = NULL;
4095 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4096 g_assert (!pass_mrgctx);
4099 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4102 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4105 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4109 if (mini_is_gsharedvt_klass (cfg, klass)) {
4110 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4111 MonoInst *res, *is_ref, *src_var, *addr;
4114 dreg = alloc_ireg (cfg);
4116 NEW_BBLOCK (cfg, is_ref_bb);
4117 NEW_BBLOCK (cfg, is_nullable_bb);
4118 NEW_BBLOCK (cfg, end_bb);
4119 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4130 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4131 ins->opcode = OP_STOREV_MEMBASE;
4133 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4134 res->type = STACK_OBJ;
4136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4139 MONO_START_BB (cfg, is_ref_bb);
4141 /* val is a vtype, so has to load the value manually */
4142 src_var = get_vreg_to_inst (cfg, val->dreg);
4144 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4145 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4150 MONO_START_BB (cfg, is_nullable_bb);
4153 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4154 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4156 MonoMethodSignature *box_sig;
4159 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4160 * construct that method at JIT time, so have to do things by hand.
4162 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4163 box_sig->ret = &mono_defaults.object_class->byval_arg;
4164 box_sig->param_count = 1;
4165 box_sig->params [0] = &klass->byval_arg;
4166 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4167 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4168 res->type = STACK_OBJ;
4172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4174 MONO_START_BB (cfg, end_bb);
4176 *out_cbb = cfg->cbb;
4180 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4184 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4190 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4193 MonoGenericContainer *container;
4194 MonoGenericInst *ginst;
4196 if (klass->generic_class) {
4197 container = klass->generic_class->container_class->generic_container;
4198 ginst = klass->generic_class->context.class_inst;
4199 } else if (klass->generic_container && context_used) {
4200 container = klass->generic_container;
4201 ginst = container->context.class_inst;
4206 for (i = 0; i < container->type_argc; ++i) {
4208 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4210 type = ginst->type_argv [i];
4211 if (mini_type_is_reference (cfg, type))
4217 static GHashTable* direct_icall_type_hash;
4220 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4222 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4223 if (!direct_icalls_enabled (cfg))
4227 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4228 * Whitelist a few icalls for now.
4230 if (!direct_icall_type_hash) {
4231 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4233 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4234 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4235 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4236 mono_memory_barrier ();
4237 direct_icall_type_hash = h;
4240 if (cmethod->klass == mono_defaults.math_class)
4242 /* No locking needed */
4243 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4248 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4251 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4253 MonoMethod *mono_castclass;
4256 mono_castclass = mono_marshal_get_castclass_with_cache ();
4258 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4259 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4260 reset_cast_details (cfg);
4261 *out_bblock = cfg->cbb;
4267 get_castclass_cache_idx (MonoCompile *cfg)
4269 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4270 cfg->castclass_cache_index ++;
4271 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4275 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4284 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4287 if (cfg->compile_aot) {
4288 idx = get_castclass_cache_idx (cfg);
4289 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4291 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4294 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4296 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4300 * Returns NULL and set the cfg exception on error.
4303 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4305 MonoBasicBlock *is_null_bb;
4306 int obj_reg = src->dreg;
4307 int vtable_reg = alloc_preg (cfg);
4309 MonoInst *klass_inst = NULL, *res;
4310 MonoBasicBlock *bblock;
4314 context_used = mini_class_check_context_used (cfg, klass);
4316 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4317 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4318 (*inline_costs) += 2;
4321 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4322 MonoMethod *mono_castclass;
4323 MonoInst *iargs [1];
4326 mono_castclass = mono_marshal_get_castclass (klass);
4329 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4330 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4331 iargs, ip, cfg->real_offset, TRUE, &bblock);
4332 reset_cast_details (cfg);
4333 CHECK_CFG_EXCEPTION;
4334 g_assert (costs > 0);
4336 cfg->real_offset += 5;
4338 (*inline_costs) += costs;
4347 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4348 MonoInst *cache_ins;
4350 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4355 /* klass - it's the second element of the cache entry*/
4356 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4359 args [2] = cache_ins;
4361 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4364 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4367 NEW_BBLOCK (cfg, is_null_bb);
4369 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4372 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4374 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4376 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4378 int klass_reg = alloc_preg (cfg);
4380 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4382 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4383 /* the remoting code is broken, access the class for now */
4384 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4385 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4387 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4388 cfg->exception_ptr = klass;
4391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4396 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4399 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4403 MONO_START_BB (cfg, is_null_bb);
4405 reset_cast_details (cfg);
4416 * Returns NULL and set the cfg exception on error.
4419 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4422 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4423 int obj_reg = src->dreg;
4424 int vtable_reg = alloc_preg (cfg);
4425 int res_reg = alloc_ireg_ref (cfg);
4426 MonoInst *klass_inst = NULL;
4431 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4432 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4433 MonoInst *cache_ins;
4435 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4440 /* klass - it's the second element of the cache entry*/
4441 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4444 args [2] = cache_ins;
4446 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4449 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4452 NEW_BBLOCK (cfg, is_null_bb);
4453 NEW_BBLOCK (cfg, false_bb);
4454 NEW_BBLOCK (cfg, end_bb);
4456 /* Do the assignment at the beginning, so the other assignment can be if converted */
4457 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4458 ins->type = STACK_OBJ;
4461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4466 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4467 g_assert (!context_used);
4468 /* the is_null_bb target simply copies the input register to the output */
4469 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4471 int klass_reg = alloc_preg (cfg);
4474 int rank_reg = alloc_preg (cfg);
4475 int eclass_reg = alloc_preg (cfg);
4477 g_assert (!context_used);
4478 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4479 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4480 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4483 if (klass->cast_class == mono_defaults.object_class) {
4484 int parent_reg = alloc_preg (cfg);
4485 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4486 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4487 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4489 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4490 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4491 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4493 } else if (klass->cast_class == mono_defaults.enum_class) {
4494 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4496 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4497 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4499 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4500 /* Check that the object is a vector too */
4501 int bounds_reg = alloc_preg (cfg);
4502 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4507 /* the is_null_bb target simply copies the input register to the output */
4508 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4510 } else if (mono_class_is_nullable (klass)) {
4511 g_assert (!context_used);
4512 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4513 /* the is_null_bb target simply copies the input register to the output */
4514 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4516 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4517 g_assert (!context_used);
4518 /* the remoting code is broken, access the class for now */
4519 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4520 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4522 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4523 cfg->exception_ptr = klass;
4526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4535 /* the is_null_bb target simply copies the input register to the output */
4536 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4541 MONO_START_BB (cfg, false_bb);
4543 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4546 MONO_START_BB (cfg, is_null_bb);
4548 MONO_START_BB (cfg, end_bb);
4554 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4556 /* This opcode takes as input an object reference and a class, and returns:
4557 0) if the object is an instance of the class,
4558 1) if the object is not instance of the class,
4559 2) if the object is a proxy whose type cannot be determined */
4562 #ifndef DISABLE_REMOTING
4563 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4565 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4567 int obj_reg = src->dreg;
4568 int dreg = alloc_ireg (cfg);
4570 #ifndef DISABLE_REMOTING
4571 int klass_reg = alloc_preg (cfg);
4574 NEW_BBLOCK (cfg, true_bb);
4575 NEW_BBLOCK (cfg, false_bb);
4576 NEW_BBLOCK (cfg, end_bb);
4577 #ifndef DISABLE_REMOTING
4578 NEW_BBLOCK (cfg, false2_bb);
4579 NEW_BBLOCK (cfg, no_proxy_bb);
4582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4585 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4586 #ifndef DISABLE_REMOTING
4587 NEW_BBLOCK (cfg, interface_fail_bb);
4590 tmp_reg = alloc_preg (cfg);
4591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4592 #ifndef DISABLE_REMOTING
4593 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4594 MONO_START_BB (cfg, interface_fail_bb);
4595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4597 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4599 tmp_reg = alloc_preg (cfg);
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4604 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4607 #ifndef DISABLE_REMOTING
4608 tmp_reg = alloc_preg (cfg);
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4612 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4613 tmp_reg = alloc_preg (cfg);
4614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4617 tmp_reg = alloc_preg (cfg);
4618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4622 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4625 MONO_START_BB (cfg, no_proxy_bb);
4627 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4629 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4633 MONO_START_BB (cfg, false_bb);
4635 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4638 #ifndef DISABLE_REMOTING
4639 MONO_START_BB (cfg, false2_bb);
4641 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4645 MONO_START_BB (cfg, true_bb);
4647 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4649 MONO_START_BB (cfg, end_bb);
4652 MONO_INST_NEW (cfg, ins, OP_ICONST);
4654 ins->type = STACK_I4;
4660 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4662 /* This opcode takes as input an object reference and a class, and returns:
4663 0) if the object is an instance of the class,
4664 1) if the object is a proxy whose type cannot be determined
4665 an InvalidCastException exception is thrown otherwhise*/
4668 #ifndef DISABLE_REMOTING
4669 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4671 MonoBasicBlock *ok_result_bb;
4673 int obj_reg = src->dreg;
4674 int dreg = alloc_ireg (cfg);
4675 int tmp_reg = alloc_preg (cfg);
4677 #ifndef DISABLE_REMOTING
4678 int klass_reg = alloc_preg (cfg);
4679 NEW_BBLOCK (cfg, end_bb);
4682 NEW_BBLOCK (cfg, ok_result_bb);
4684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4687 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4689 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4690 #ifndef DISABLE_REMOTING
4691 NEW_BBLOCK (cfg, interface_fail_bb);
4693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4694 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4695 MONO_START_BB (cfg, interface_fail_bb);
4696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4698 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4700 tmp_reg = alloc_preg (cfg);
4701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4703 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4705 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4709 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4713 #ifndef DISABLE_REMOTING
4714 NEW_BBLOCK (cfg, no_proxy_bb);
4716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4718 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4720 tmp_reg = alloc_preg (cfg);
4721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4724 tmp_reg = alloc_preg (cfg);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4729 NEW_BBLOCK (cfg, fail_1_bb);
4731 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4733 MONO_START_BB (cfg, fail_1_bb);
4735 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4738 MONO_START_BB (cfg, no_proxy_bb);
4740 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4742 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4746 MONO_START_BB (cfg, ok_result_bb);
4748 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4750 #ifndef DISABLE_REMOTING
4751 MONO_START_BB (cfg, end_bb);
4755 MONO_INST_NEW (cfg, ins, OP_ICONST);
4757 ins->type = STACK_I4;
4762 static G_GNUC_UNUSED MonoInst*
4763 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4765 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4766 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4769 switch (enum_type->type) {
4772 #if SIZEOF_REGISTER == 8
4784 MonoInst *load, *and, *cmp, *ceq;
4785 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4786 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4787 int dest_reg = alloc_ireg (cfg);
4789 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4790 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4791 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4792 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4794 ceq->type = STACK_I4;
4797 load = mono_decompose_opcode (cfg, load, NULL);
4798 and = mono_decompose_opcode (cfg, and, NULL);
4799 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4800 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4808 * Returns NULL and set the cfg exception on error.
4810 static G_GNUC_UNUSED MonoInst*
4811 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4815 gpointer trampoline;
4816 MonoInst *obj, *method_ins, *tramp_ins;
4820 // FIXME reenable optimisation for virtual case
4825 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4828 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4832 obj = handle_alloc (cfg, klass, FALSE, 0);
4836 /* Inline the contents of mono_delegate_ctor */
4838 /* Set target field */
4839 /* Optimize away setting of NULL target */
4840 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4841 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4842 if (cfg->gen_write_barriers) {
4843 dreg = alloc_preg (cfg);
4844 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4845 emit_write_barrier (cfg, ptr, target);
4849 /* Set method field */
4850 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4854 * To avoid looking up the compiled code belonging to the target method
4855 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4856 * store it, and we fill it after the method has been compiled.
4858 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4859 MonoInst *code_slot_ins;
4862 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4864 domain = mono_domain_get ();
4865 mono_domain_lock (domain);
4866 if (!domain_jit_info (domain)->method_code_hash)
4867 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4868 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4870 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4871 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4873 mono_domain_unlock (domain);
4875 if (cfg->compile_aot)
4876 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4878 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4883 if (cfg->compile_aot) {
4884 MonoDelegateClassMethodPair *del_tramp;
4886 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4887 del_tramp->klass = klass;
4888 del_tramp->method = context_used ? NULL : method;
4889 del_tramp->virtual = virtual;
4890 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4893 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4895 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4896 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4899 /* Set invoke_impl field */
4901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4903 dreg = alloc_preg (cfg);
4904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4905 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4907 dreg = alloc_preg (cfg);
4908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4912 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4918 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4920 MonoJitICallInfo *info;
4922 /* Need to register the icall so it gets an icall wrapper */
4923 info = mono_get_array_new_va_icall (rank);
4925 cfg->flags |= MONO_CFG_HAS_VARARGS;
4927 /* mono_array_new_va () needs a vararg calling convention */
4928 cfg->disable_llvm = TRUE;
4930 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4931 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4935 * handle_constrained_gsharedvt_call:
4937 * Handle constrained calls where the receiver is a gsharedvt type.
4938 * Return the instruction representing the call. Set the cfg exception on failure.
4941 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4942 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4944 MonoInst *ins = NULL;
4945 MonoBasicBlock *bblock = *ref_bblock;
4946 gboolean emit_widen = *ref_emit_widen;
4949 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4950 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4951 * pack the arguments into an array, and do the rest of the work in in an icall.
4953 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4954 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4955 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4956 MonoInst *args [16];
4959 * This case handles calls to
4960 * - object:ToString()/Equals()/GetHashCode(),
4961 * - System.IComparable<T>:CompareTo()
4962 * - System.IEquatable<T>:Equals ()
4963 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4967 if (mono_method_check_context_used (cmethod))
4968 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4970 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4971 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4973 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4974 if (fsig->hasthis && fsig->param_count) {
4975 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4976 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4977 ins->dreg = alloc_preg (cfg);
4978 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4979 MONO_ADD_INS (cfg->cbb, ins);
4982 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4985 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4987 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4988 addr_reg = ins->dreg;
4989 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4991 EMIT_NEW_ICONST (cfg, args [3], 0);
4992 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4995 EMIT_NEW_ICONST (cfg, args [3], 0);
4996 EMIT_NEW_ICONST (cfg, args [4], 0);
4998 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5001 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
5002 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
5003 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5007 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5008 MONO_ADD_INS (cfg->cbb, add);
5010 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5011 MONO_ADD_INS (cfg->cbb, ins);
5012 /* ins represents the call result */
5015 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5018 *ref_emit_widen = emit_widen;
5019 *ref_bblock = bblock;
5028 mono_emit_load_got_addr (MonoCompile *cfg)
5030 MonoInst *getaddr, *dummy_use;
5032 if (!cfg->got_var || cfg->got_var_allocated)
5035 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5036 getaddr->cil_code = cfg->header->code;
5037 getaddr->dreg = cfg->got_var->dreg;
5039 /* Add it to the start of the first bblock */
5040 if (cfg->bb_entry->code) {
5041 getaddr->next = cfg->bb_entry->code;
5042 cfg->bb_entry->code = getaddr;
5045 MONO_ADD_INS (cfg->bb_entry, getaddr);
5047 cfg->got_var_allocated = TRUE;
5050 * Add a dummy use to keep the got_var alive, since real uses might
5051 * only be generated by the back ends.
5052 * Add it to end_bblock, so the variable's lifetime covers the whole
5054 * It would be better to make the usage of the got var explicit in all
5055 * cases when the backend needs it (i.e. calls, throw etc.), so this
5056 * wouldn't be needed.
5058 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5059 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5062 static int inline_limit;
5063 static gboolean inline_limit_inited;
5066 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5068 MonoMethodHeaderSummary header;
5070 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5071 MonoMethodSignature *sig = mono_method_signature (method);
5075 if (cfg->disable_inline)
5077 if (cfg->generic_sharing_context)
5080 if (cfg->inline_depth > 10)
5083 #ifdef MONO_ARCH_HAVE_LMF_OPS
5084 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5085 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5086 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5091 if (!mono_method_get_header_summary (method, &header))
5094 /*runtime, icall and pinvoke are checked by summary call*/
5095 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5096 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5097 (mono_class_is_marshalbyref (method->klass)) ||
5101 /* also consider num_locals? */
5102 /* Do the size check early to avoid creating vtables */
5103 if (!inline_limit_inited) {
5104 if (g_getenv ("MONO_INLINELIMIT"))
5105 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5107 inline_limit = INLINE_LENGTH_LIMIT;
5108 inline_limit_inited = TRUE;
5110 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5114 * if we can initialize the class of the method right away, we do,
5115 * otherwise we don't allow inlining if the class needs initialization,
5116 * since it would mean inserting a call to mono_runtime_class_init()
5117 * inside the inlined code
5119 if (!(cfg->opt & MONO_OPT_SHARED)) {
5120 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5121 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5122 vtable = mono_class_vtable (cfg->domain, method->klass);
5125 if (!cfg->compile_aot)
5126 mono_runtime_class_init (vtable);
5127 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5128 if (cfg->run_cctors && method->klass->has_cctor) {
5129 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5130 if (!method->klass->runtime_info)
5131 /* No vtable created yet */
5133 vtable = mono_class_vtable (cfg->domain, method->klass);
5136 /* This makes so that inline cannot trigger */
5137 /* .cctors: too many apps depend on them */
5138 /* running with a specific order... */
5139 if (! vtable->initialized)
5141 mono_runtime_class_init (vtable);
5143 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5144 if (!method->klass->runtime_info)
5145 /* No vtable created yet */
5147 vtable = mono_class_vtable (cfg->domain, method->klass);
5150 if (!vtable->initialized)
5155 * If we're compiling for shared code
5156 * the cctor will need to be run at aot method load time, for example,
5157 * or at the end of the compilation of the inlining method.
5159 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5164 * CAS - do not inline methods with declarative security
5165 * Note: this has to be before any possible return TRUE;
5167 if (mono_security_method_has_declsec (method))
5170 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5171 if (mono_arch_is_soft_float ()) {
5173 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5175 for (i = 0; i < sig->param_count; ++i)
5176 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5181 if (g_list_find (cfg->dont_inline, method))
5188 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5190 if (!cfg->compile_aot) {
5192 if (vtable->initialized)
5196 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5197 if (cfg->method == method)
5201 if (!mono_class_needs_cctor_run (klass, method))
5204 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5205 /* The initialization is already done before the method is called */
5212 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5216 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5219 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5222 mono_class_init (klass);
5223 size = mono_class_array_element_size (klass);
5226 mult_reg = alloc_preg (cfg);
5227 array_reg = arr->dreg;
5228 index_reg = index->dreg;
5230 #if SIZEOF_REGISTER == 8
5231 /* The array reg is 64 bits but the index reg is only 32 */
5232 if (COMPILE_LLVM (cfg)) {
5234 index2_reg = index_reg;
5236 index2_reg = alloc_preg (cfg);
5237 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5240 if (index->type == STACK_I8) {
5241 index2_reg = alloc_preg (cfg);
5242 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5244 index2_reg = index_reg;
5249 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5251 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5252 if (size == 1 || size == 2 || size == 4 || size == 8) {
5253 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5255 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5256 ins->klass = mono_class_get_element_class (klass);
5257 ins->type = STACK_MP;
5263 add_reg = alloc_ireg_mp (cfg);
5266 MonoInst *rgctx_ins;
5269 g_assert (cfg->generic_sharing_context);
5270 context_used = mini_class_check_context_used (cfg, klass);
5271 g_assert (context_used);
5272 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5273 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5277 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5278 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5279 ins->klass = mono_class_get_element_class (klass);
5280 ins->type = STACK_MP;
5281 MONO_ADD_INS (cfg->cbb, ins);
5286 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5288 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5290 int bounds_reg = alloc_preg (cfg);
5291 int add_reg = alloc_ireg_mp (cfg);
5292 int mult_reg = alloc_preg (cfg);
5293 int mult2_reg = alloc_preg (cfg);
5294 int low1_reg = alloc_preg (cfg);
5295 int low2_reg = alloc_preg (cfg);
5296 int high1_reg = alloc_preg (cfg);
5297 int high2_reg = alloc_preg (cfg);
5298 int realidx1_reg = alloc_preg (cfg);
5299 int realidx2_reg = alloc_preg (cfg);
5300 int sum_reg = alloc_preg (cfg);
5301 int index1, index2, tmpreg;
5305 mono_class_init (klass);
5306 size = mono_class_array_element_size (klass);
5308 index1 = index_ins1->dreg;
5309 index2 = index_ins2->dreg;
5311 #if SIZEOF_REGISTER == 8
5312 /* The array reg is 64 bits but the index reg is only 32 */
5313 if (COMPILE_LLVM (cfg)) {
5316 tmpreg = alloc_preg (cfg);
5317 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5319 tmpreg = alloc_preg (cfg);
5320 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5324 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5328 /* range checking */
5329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5330 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5332 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5333 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5334 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5335 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5336 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5337 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5338 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5341 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5342 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5343 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5344 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5345 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5346 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5348 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5349 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5351 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5352 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5354 ins->type = STACK_MP;
5356 MONO_ADD_INS (cfg->cbb, ins);
5363 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5367 MonoMethod *addr_method;
5369 MonoClass *eclass = cmethod->klass->element_class;
5371 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5374 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5376 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5377 /* emit_ldelema_2 depends on OP_LMUL */
5378 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5379 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5383 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5386 element_size = mono_class_array_element_size (eclass);
5387 addr_method = mono_marshal_get_array_address (rank, element_size);
5388 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5393 static MonoBreakPolicy
5394 always_insert_breakpoint (MonoMethod *method)
5396 return MONO_BREAK_POLICY_ALWAYS;
5399 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5402 * mono_set_break_policy:
5403 * policy_callback: the new callback function
5405 * Allow embedders to decide wherther to actually obey breakpoint instructions
5406 * (both break IL instructions and Debugger.Break () method calls), for example
5407 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5408 * untrusted or semi-trusted code.
5410 * @policy_callback will be called every time a break point instruction needs to
5411 * be inserted with the method argument being the method that calls Debugger.Break()
5412 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5413 * if it wants the breakpoint to not be effective in the given method.
5414 * #MONO_BREAK_POLICY_ALWAYS is the default.
5417 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5419 if (policy_callback)
5420 break_policy_func = policy_callback;
5422 break_policy_func = always_insert_breakpoint;
5426 should_insert_brekpoint (MonoMethod *method) {
5427 switch (break_policy_func (method)) {
5428 case MONO_BREAK_POLICY_ALWAYS:
5430 case MONO_BREAK_POLICY_NEVER:
5432 case MONO_BREAK_POLICY_ON_DBG:
5433 g_warning ("mdb no longer supported");
5436 g_warning ("Incorrect value returned from break policy callback");
5441 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5443 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5445 MonoInst *addr, *store, *load;
5446 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5448 /* the bounds check is already done by the callers */
5449 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5452 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5453 if (mini_type_is_reference (cfg, fsig->params [2]))
5454 emit_write_barrier (cfg, addr, load);
5456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5457 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5464 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5466 return mini_type_is_reference (cfg, &klass->byval_arg);
5470 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5472 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5473 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5474 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5475 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5476 MonoInst *iargs [3];
5479 mono_class_setup_vtable (obj_array);
5480 g_assert (helper->slot);
5482 if (sp [0]->type != STACK_OBJ)
5484 if (sp [2]->type != STACK_OBJ)
5491 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5495 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5498 // FIXME-VT: OP_ICONST optimization
5499 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5500 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5501 ins->opcode = OP_STOREV_MEMBASE;
5502 } else if (sp [1]->opcode == OP_ICONST) {
5503 int array_reg = sp [0]->dreg;
5504 int index_reg = sp [1]->dreg;
5505 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5508 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5509 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5511 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5512 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5513 if (generic_class_is_reference_type (cfg, klass))
5514 emit_write_barrier (cfg, addr, sp [2]);
5521 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5526 eklass = mono_class_from_mono_type (fsig->params [2]);
5528 eklass = mono_class_from_mono_type (fsig->ret);
5531 return emit_array_store (cfg, eklass, args, FALSE);
5533 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5534 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5540 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5544 //Only allow for valuetypes
5545 if (!param_klass->valuetype || !return_klass->valuetype)
5549 if (param_klass->has_references || return_klass->has_references)
5552 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5553 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5554 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5557 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5558 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5561 //And have the same size
5562 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5568 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5570 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5571 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5573 //Valuetypes that are semantically equivalent
5574 if (is_unsafe_mov_compatible (param_klass, return_klass))
5577 //Arrays of valuetypes that are semantically equivalent
5578 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5585 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5587 #ifdef MONO_ARCH_SIMD_INTRINSICS
5588 MonoInst *ins = NULL;
5590 if (cfg->opt & MONO_OPT_SIMD) {
5591 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5597 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5601 emit_memory_barrier (MonoCompile *cfg, int kind)
5603 MonoInst *ins = NULL;
5604 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5605 MONO_ADD_INS (cfg->cbb, ins);
5606 ins->backend.memory_barrier_kind = kind;
5612 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5614 MonoInst *ins = NULL;
5617 /* The LLVM backend supports these intrinsics */
5618 if (cmethod->klass == mono_defaults.math_class) {
5619 if (strcmp (cmethod->name, "Sin") == 0) {
5621 } else if (strcmp (cmethod->name, "Cos") == 0) {
5623 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5625 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5629 if (opcode && fsig->param_count == 1) {
5630 MONO_INST_NEW (cfg, ins, opcode);
5631 ins->type = STACK_R8;
5632 ins->dreg = mono_alloc_freg (cfg);
5633 ins->sreg1 = args [0]->dreg;
5634 MONO_ADD_INS (cfg->cbb, ins);
5638 if (cfg->opt & MONO_OPT_CMOV) {
5639 if (strcmp (cmethod->name, "Min") == 0) {
5640 if (fsig->params [0]->type == MONO_TYPE_I4)
5642 if (fsig->params [0]->type == MONO_TYPE_U4)
5643 opcode = OP_IMIN_UN;
5644 else if (fsig->params [0]->type == MONO_TYPE_I8)
5646 else if (fsig->params [0]->type == MONO_TYPE_U8)
5647 opcode = OP_LMIN_UN;
5648 } else if (strcmp (cmethod->name, "Max") == 0) {
5649 if (fsig->params [0]->type == MONO_TYPE_I4)
5651 if (fsig->params [0]->type == MONO_TYPE_U4)
5652 opcode = OP_IMAX_UN;
5653 else if (fsig->params [0]->type == MONO_TYPE_I8)
5655 else if (fsig->params [0]->type == MONO_TYPE_U8)
5656 opcode = OP_LMAX_UN;
5660 if (opcode && fsig->param_count == 2) {
5661 MONO_INST_NEW (cfg, ins, opcode);
5662 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5663 ins->dreg = mono_alloc_ireg (cfg);
5664 ins->sreg1 = args [0]->dreg;
5665 ins->sreg2 = args [1]->dreg;
5666 MONO_ADD_INS (cfg->cbb, ins);
5674 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5676 if (cmethod->klass == mono_defaults.array_class) {
5677 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5678 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5679 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5680 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5681 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5682 return emit_array_unsafe_mov (cfg, fsig, args);
5689 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5691 MonoInst *ins = NULL;
5693 static MonoClass *runtime_helpers_class = NULL;
5694 if (! runtime_helpers_class)
5695 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5696 "System.Runtime.CompilerServices", "RuntimeHelpers");
5698 if (cmethod->klass == mono_defaults.string_class) {
5699 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5700 int dreg = alloc_ireg (cfg);
5701 int index_reg = alloc_preg (cfg);
5702 int add_reg = alloc_preg (cfg);
5704 #if SIZEOF_REGISTER == 8
5705 /* The array reg is 64 bits but the index reg is only 32 */
5706 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5708 index_reg = args [1]->dreg;
5710 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5712 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5713 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5714 add_reg = ins->dreg;
5715 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5718 int mult_reg = alloc_preg (cfg);
5719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5720 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5721 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5722 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5724 type_from_op (cfg, ins, NULL, NULL);
5726 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5727 int dreg = alloc_ireg (cfg);
5728 /* Decompose later to allow more optimizations */
5729 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5730 ins->type = STACK_I4;
5731 ins->flags |= MONO_INST_FAULT;
5732 cfg->cbb->has_array_access = TRUE;
5733 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5736 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5737 int mult_reg = alloc_preg (cfg);
5738 int add_reg = alloc_preg (cfg);
5740 /* The corlib functions check for oob already. */
5741 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5742 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5744 return cfg->cbb->last_ins;
5747 } else if (cmethod->klass == mono_defaults.object_class) {
5749 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5750 int dreg = alloc_ireg_ref (cfg);
5751 int vt_reg = alloc_preg (cfg);
5752 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5753 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5754 type_from_op (cfg, ins, NULL, NULL);
5757 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5758 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5759 int dreg = alloc_ireg (cfg);
5760 int t1 = alloc_ireg (cfg);
5762 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5763 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5764 ins->type = STACK_I4;
5768 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5769 MONO_INST_NEW (cfg, ins, OP_NOP);
5770 MONO_ADD_INS (cfg->cbb, ins);
5774 } else if (cmethod->klass == mono_defaults.array_class) {
5775 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5776 return emit_array_generic_access (cfg, fsig, args, FALSE);
5777 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5778 return emit_array_generic_access (cfg, fsig, args, TRUE);
5780 #ifndef MONO_BIG_ARRAYS
5782 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5785 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5786 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5787 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5788 int dreg = alloc_ireg (cfg);
5789 int bounds_reg = alloc_ireg_mp (cfg);
5790 MonoBasicBlock *end_bb, *szarray_bb;
5791 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5793 NEW_BBLOCK (cfg, end_bb);
5794 NEW_BBLOCK (cfg, szarray_bb);
5796 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5797 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5800 /* Non-szarray case */
5802 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5803 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5805 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5806 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5807 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5808 MONO_START_BB (cfg, szarray_bb);
5811 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5812 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5814 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5815 MONO_START_BB (cfg, end_bb);
5817 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5818 ins->type = STACK_I4;
5824 if (cmethod->name [0] != 'g')
5827 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5828 int dreg = alloc_ireg (cfg);
5829 int vtable_reg = alloc_preg (cfg);
5830 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5831 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5832 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5833 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5834 type_from_op (cfg, ins, NULL, NULL);
5837 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5838 int dreg = alloc_ireg (cfg);
5840 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5841 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5842 type_from_op (cfg, ins, NULL, NULL);
5847 } else if (cmethod->klass == runtime_helpers_class) {
5849 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5850 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5854 } else if (cmethod->klass == mono_defaults.thread_class) {
5855 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5856 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5857 MONO_ADD_INS (cfg->cbb, ins);
5859 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5860 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5861 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5863 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5865 if (fsig->params [0]->type == MONO_TYPE_I1)
5866 opcode = OP_LOADI1_MEMBASE;
5867 else if (fsig->params [0]->type == MONO_TYPE_U1)
5868 opcode = OP_LOADU1_MEMBASE;
5869 else if (fsig->params [0]->type == MONO_TYPE_I2)
5870 opcode = OP_LOADI2_MEMBASE;
5871 else if (fsig->params [0]->type == MONO_TYPE_U2)
5872 opcode = OP_LOADU2_MEMBASE;
5873 else if (fsig->params [0]->type == MONO_TYPE_I4)
5874 opcode = OP_LOADI4_MEMBASE;
5875 else if (fsig->params [0]->type == MONO_TYPE_U4)
5876 opcode = OP_LOADU4_MEMBASE;
5877 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5878 opcode = OP_LOADI8_MEMBASE;
5879 else if (fsig->params [0]->type == MONO_TYPE_R4)
5880 opcode = OP_LOADR4_MEMBASE;
5881 else if (fsig->params [0]->type == MONO_TYPE_R8)
5882 opcode = OP_LOADR8_MEMBASE;
5883 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5884 opcode = OP_LOAD_MEMBASE;
5887 MONO_INST_NEW (cfg, ins, opcode);
5888 ins->inst_basereg = args [0]->dreg;
5889 ins->inst_offset = 0;
5890 MONO_ADD_INS (cfg->cbb, ins);
5892 switch (fsig->params [0]->type) {
5899 ins->dreg = mono_alloc_ireg (cfg);
5900 ins->type = STACK_I4;
5904 ins->dreg = mono_alloc_lreg (cfg);
5905 ins->type = STACK_I8;
5909 ins->dreg = mono_alloc_ireg (cfg);
5910 #if SIZEOF_REGISTER == 8
5911 ins->type = STACK_I8;
5913 ins->type = STACK_I4;
5918 ins->dreg = mono_alloc_freg (cfg);
5919 ins->type = STACK_R8;
5922 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5923 ins->dreg = mono_alloc_ireg_ref (cfg);
5924 ins->type = STACK_OBJ;
5928 if (opcode == OP_LOADI8_MEMBASE)
5929 ins = mono_decompose_opcode (cfg, ins, NULL);
5931 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5935 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5937 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5939 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5940 opcode = OP_STOREI1_MEMBASE_REG;
5941 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5942 opcode = OP_STOREI2_MEMBASE_REG;
5943 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5944 opcode = OP_STOREI4_MEMBASE_REG;
5945 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5946 opcode = OP_STOREI8_MEMBASE_REG;
5947 else if (fsig->params [0]->type == MONO_TYPE_R4)
5948 opcode = OP_STORER4_MEMBASE_REG;
5949 else if (fsig->params [0]->type == MONO_TYPE_R8)
5950 opcode = OP_STORER8_MEMBASE_REG;
5951 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5952 opcode = OP_STORE_MEMBASE_REG;
5955 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5957 MONO_INST_NEW (cfg, ins, opcode);
5958 ins->sreg1 = args [1]->dreg;
5959 ins->inst_destbasereg = args [0]->dreg;
5960 ins->inst_offset = 0;
5961 MONO_ADD_INS (cfg->cbb, ins);
5963 if (opcode == OP_STOREI8_MEMBASE_REG)
5964 ins = mono_decompose_opcode (cfg, ins, NULL);
5969 } else if (cmethod->klass == mono_defaults.monitor_class) {
5970 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5971 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5974 if (COMPILE_LLVM (cfg)) {
5976 * Pass the argument normally, the LLVM backend will handle the
5977 * calling convention problems.
5979 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5981 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5982 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5983 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5984 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5987 return (MonoInst*)call;
5988 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5989 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5992 if (COMPILE_LLVM (cfg)) {
5994 * Pass the argument normally, the LLVM backend will handle the
5995 * calling convention problems.
5997 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5999 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
6000 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6001 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6002 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6005 return (MonoInst*)call;
6007 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6010 if (COMPILE_LLVM (cfg)) {
6011 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6013 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6014 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6015 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6016 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6019 return (MonoInst*)call;
6022 } else if (cmethod->klass->image == mono_defaults.corlib &&
6023 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6024 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6027 #if SIZEOF_REGISTER == 8
6028 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6029 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6030 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6031 ins->dreg = mono_alloc_preg (cfg);
6032 ins->sreg1 = args [0]->dreg;
6033 ins->type = STACK_I8;
6034 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6035 MONO_ADD_INS (cfg->cbb, ins);
6039 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6041 /* 64 bit reads are already atomic */
6042 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6043 load_ins->dreg = mono_alloc_preg (cfg);
6044 load_ins->inst_basereg = args [0]->dreg;
6045 load_ins->inst_offset = 0;
6046 load_ins->type = STACK_I8;
6047 MONO_ADD_INS (cfg->cbb, load_ins);
6049 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6056 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6057 MonoInst *ins_iconst;
6060 if (fsig->params [0]->type == MONO_TYPE_I4) {
6061 opcode = OP_ATOMIC_ADD_I4;
6062 cfg->has_atomic_add_i4 = TRUE;
6064 #if SIZEOF_REGISTER == 8
6065 else if (fsig->params [0]->type == MONO_TYPE_I8)
6066 opcode = OP_ATOMIC_ADD_I8;
6069 if (!mono_arch_opcode_supported (opcode))
6071 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6072 ins_iconst->inst_c0 = 1;
6073 ins_iconst->dreg = mono_alloc_ireg (cfg);
6074 MONO_ADD_INS (cfg->cbb, ins_iconst);
6076 MONO_INST_NEW (cfg, ins, opcode);
6077 ins->dreg = mono_alloc_ireg (cfg);
6078 ins->inst_basereg = args [0]->dreg;
6079 ins->inst_offset = 0;
6080 ins->sreg2 = ins_iconst->dreg;
6081 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6082 MONO_ADD_INS (cfg->cbb, ins);
6084 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6085 MonoInst *ins_iconst;
6088 if (fsig->params [0]->type == MONO_TYPE_I4) {
6089 opcode = OP_ATOMIC_ADD_I4;
6090 cfg->has_atomic_add_i4 = TRUE;
6092 #if SIZEOF_REGISTER == 8
6093 else if (fsig->params [0]->type == MONO_TYPE_I8)
6094 opcode = OP_ATOMIC_ADD_I8;
6097 if (!mono_arch_opcode_supported (opcode))
6099 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6100 ins_iconst->inst_c0 = -1;
6101 ins_iconst->dreg = mono_alloc_ireg (cfg);
6102 MONO_ADD_INS (cfg->cbb, ins_iconst);
6104 MONO_INST_NEW (cfg, ins, opcode);
6105 ins->dreg = mono_alloc_ireg (cfg);
6106 ins->inst_basereg = args [0]->dreg;
6107 ins->inst_offset = 0;
6108 ins->sreg2 = ins_iconst->dreg;
6109 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6110 MONO_ADD_INS (cfg->cbb, ins);
6112 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6115 if (fsig->params [0]->type == MONO_TYPE_I4) {
6116 opcode = OP_ATOMIC_ADD_I4;
6117 cfg->has_atomic_add_i4 = TRUE;
6119 #if SIZEOF_REGISTER == 8
6120 else if (fsig->params [0]->type == MONO_TYPE_I8)
6121 opcode = OP_ATOMIC_ADD_I8;
6124 if (!mono_arch_opcode_supported (opcode))
6126 MONO_INST_NEW (cfg, ins, opcode);
6127 ins->dreg = mono_alloc_ireg (cfg);
6128 ins->inst_basereg = args [0]->dreg;
6129 ins->inst_offset = 0;
6130 ins->sreg2 = args [1]->dreg;
6131 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6132 MONO_ADD_INS (cfg->cbb, ins);
6135 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6136 MonoInst *f2i = NULL, *i2f;
6137 guint32 opcode, f2i_opcode, i2f_opcode;
6138 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6139 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6141 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6142 fsig->params [0]->type == MONO_TYPE_R4) {
6143 opcode = OP_ATOMIC_EXCHANGE_I4;
6144 f2i_opcode = OP_MOVE_F_TO_I4;
6145 i2f_opcode = OP_MOVE_I4_TO_F;
6146 cfg->has_atomic_exchange_i4 = TRUE;
6148 #if SIZEOF_REGISTER == 8
6150 fsig->params [0]->type == MONO_TYPE_I8 ||
6151 fsig->params [0]->type == MONO_TYPE_R8 ||
6152 fsig->params [0]->type == MONO_TYPE_I) {
6153 opcode = OP_ATOMIC_EXCHANGE_I8;
6154 f2i_opcode = OP_MOVE_F_TO_I8;
6155 i2f_opcode = OP_MOVE_I8_TO_F;
6158 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6159 opcode = OP_ATOMIC_EXCHANGE_I4;
6160 cfg->has_atomic_exchange_i4 = TRUE;
6166 if (!mono_arch_opcode_supported (opcode))
6170 /* TODO: Decompose these opcodes instead of bailing here. */
6171 if (COMPILE_SOFT_FLOAT (cfg))
6174 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6175 f2i->dreg = mono_alloc_ireg (cfg);
6176 f2i->sreg1 = args [1]->dreg;
6177 if (f2i_opcode == OP_MOVE_F_TO_I4)
6178 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6179 MONO_ADD_INS (cfg->cbb, f2i);
6182 MONO_INST_NEW (cfg, ins, opcode);
6183 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6184 ins->inst_basereg = args [0]->dreg;
6185 ins->inst_offset = 0;
6186 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6187 MONO_ADD_INS (cfg->cbb, ins);
6189 switch (fsig->params [0]->type) {
6191 ins->type = STACK_I4;
6194 ins->type = STACK_I8;
6197 #if SIZEOF_REGISTER == 8
6198 ins->type = STACK_I8;
6200 ins->type = STACK_I4;
6205 ins->type = STACK_R8;
6208 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6209 ins->type = STACK_OBJ;
6214 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6215 i2f->dreg = mono_alloc_freg (cfg);
6216 i2f->sreg1 = ins->dreg;
6217 i2f->type = STACK_R8;
6218 if (i2f_opcode == OP_MOVE_I4_TO_F)
6219 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6220 MONO_ADD_INS (cfg->cbb, i2f);
6225 if (cfg->gen_write_barriers && is_ref)
6226 emit_write_barrier (cfg, args [0], args [1]);
6228 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6229 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6230 guint32 opcode, f2i_opcode, i2f_opcode;
6231 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6232 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6234 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6235 fsig->params [1]->type == MONO_TYPE_R4) {
6236 opcode = OP_ATOMIC_CAS_I4;
6237 f2i_opcode = OP_MOVE_F_TO_I4;
6238 i2f_opcode = OP_MOVE_I4_TO_F;
6239 cfg->has_atomic_cas_i4 = TRUE;
6241 #if SIZEOF_REGISTER == 8
6243 fsig->params [1]->type == MONO_TYPE_I8 ||
6244 fsig->params [1]->type == MONO_TYPE_R8 ||
6245 fsig->params [1]->type == MONO_TYPE_I) {
6246 opcode = OP_ATOMIC_CAS_I8;
6247 f2i_opcode = OP_MOVE_F_TO_I8;
6248 i2f_opcode = OP_MOVE_I8_TO_F;
6251 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6252 opcode = OP_ATOMIC_CAS_I4;
6253 cfg->has_atomic_cas_i4 = TRUE;
6259 if (!mono_arch_opcode_supported (opcode))
6263 /* TODO: Decompose these opcodes instead of bailing here. */
6264 if (COMPILE_SOFT_FLOAT (cfg))
6267 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6268 f2i_new->dreg = mono_alloc_ireg (cfg);
6269 f2i_new->sreg1 = args [1]->dreg;
6270 if (f2i_opcode == OP_MOVE_F_TO_I4)
6271 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6272 MONO_ADD_INS (cfg->cbb, f2i_new);
6274 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6275 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6276 f2i_cmp->sreg1 = args [2]->dreg;
6277 if (f2i_opcode == OP_MOVE_F_TO_I4)
6278 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6279 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6282 MONO_INST_NEW (cfg, ins, opcode);
6283 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6284 ins->sreg1 = args [0]->dreg;
6285 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6286 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6287 MONO_ADD_INS (cfg->cbb, ins);
6289 switch (fsig->params [1]->type) {
6291 ins->type = STACK_I4;
6294 ins->type = STACK_I8;
6297 #if SIZEOF_REGISTER == 8
6298 ins->type = STACK_I8;
6300 ins->type = STACK_I4;
6305 ins->type = STACK_R8;
6308 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6309 ins->type = STACK_OBJ;
6314 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6315 i2f->dreg = mono_alloc_freg (cfg);
6316 i2f->sreg1 = ins->dreg;
6317 i2f->type = STACK_R8;
6318 if (i2f_opcode == OP_MOVE_I4_TO_F)
6319 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6320 MONO_ADD_INS (cfg->cbb, i2f);
6325 if (cfg->gen_write_barriers && is_ref)
6326 emit_write_barrier (cfg, args [0], args [1]);
6328 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6329 fsig->params [1]->type == MONO_TYPE_I4) {
6330 MonoInst *cmp, *ceq;
6332 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6335 /* int32 r = CAS (location, value, comparand); */
6336 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6337 ins->dreg = alloc_ireg (cfg);
6338 ins->sreg1 = args [0]->dreg;
6339 ins->sreg2 = args [1]->dreg;
6340 ins->sreg3 = args [2]->dreg;
6341 ins->type = STACK_I4;
6342 MONO_ADD_INS (cfg->cbb, ins);
6344 /* bool result = r == comparand; */
6345 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6346 cmp->sreg1 = ins->dreg;
6347 cmp->sreg2 = args [2]->dreg;
6348 cmp->type = STACK_I4;
6349 MONO_ADD_INS (cfg->cbb, cmp);
6351 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6352 ceq->dreg = alloc_ireg (cfg);
6353 ceq->type = STACK_I4;
6354 MONO_ADD_INS (cfg->cbb, ceq);
6356 /* *success = result; */
6357 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6359 cfg->has_atomic_cas_i4 = TRUE;
6361 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6362 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6366 } else if (cmethod->klass->image == mono_defaults.corlib &&
6367 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6368 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6371 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6373 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6374 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6376 if (fsig->params [0]->type == MONO_TYPE_I1)
6377 opcode = OP_ATOMIC_LOAD_I1;
6378 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6379 opcode = OP_ATOMIC_LOAD_U1;
6380 else if (fsig->params [0]->type == MONO_TYPE_I2)
6381 opcode = OP_ATOMIC_LOAD_I2;
6382 else if (fsig->params [0]->type == MONO_TYPE_U2)
6383 opcode = OP_ATOMIC_LOAD_U2;
6384 else if (fsig->params [0]->type == MONO_TYPE_I4)
6385 opcode = OP_ATOMIC_LOAD_I4;
6386 else if (fsig->params [0]->type == MONO_TYPE_U4)
6387 opcode = OP_ATOMIC_LOAD_U4;
6388 else if (fsig->params [0]->type == MONO_TYPE_R4)
6389 opcode = OP_ATOMIC_LOAD_R4;
6390 else if (fsig->params [0]->type == MONO_TYPE_R8)
6391 opcode = OP_ATOMIC_LOAD_R8;
6392 #if SIZEOF_REGISTER == 8
6393 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6394 opcode = OP_ATOMIC_LOAD_I8;
6395 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6396 opcode = OP_ATOMIC_LOAD_U8;
6398 else if (fsig->params [0]->type == MONO_TYPE_I)
6399 opcode = OP_ATOMIC_LOAD_I4;
6400 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6401 opcode = OP_ATOMIC_LOAD_U4;
6405 if (!mono_arch_opcode_supported (opcode))
6408 MONO_INST_NEW (cfg, ins, opcode);
6409 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6410 ins->sreg1 = args [0]->dreg;
6411 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6412 MONO_ADD_INS (cfg->cbb, ins);
6414 switch (fsig->params [0]->type) {
6415 case MONO_TYPE_BOOLEAN:
6422 ins->type = STACK_I4;
6426 ins->type = STACK_I8;
6430 #if SIZEOF_REGISTER == 8
6431 ins->type = STACK_I8;
6433 ins->type = STACK_I4;
6438 ins->type = STACK_R8;
6441 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6442 ins->type = STACK_OBJ;
6448 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6450 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6452 if (fsig->params [0]->type == MONO_TYPE_I1)
6453 opcode = OP_ATOMIC_STORE_I1;
6454 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6455 opcode = OP_ATOMIC_STORE_U1;
6456 else if (fsig->params [0]->type == MONO_TYPE_I2)
6457 opcode = OP_ATOMIC_STORE_I2;
6458 else if (fsig->params [0]->type == MONO_TYPE_U2)
6459 opcode = OP_ATOMIC_STORE_U2;
6460 else if (fsig->params [0]->type == MONO_TYPE_I4)
6461 opcode = OP_ATOMIC_STORE_I4;
6462 else if (fsig->params [0]->type == MONO_TYPE_U4)
6463 opcode = OP_ATOMIC_STORE_U4;
6464 else if (fsig->params [0]->type == MONO_TYPE_R4)
6465 opcode = OP_ATOMIC_STORE_R4;
6466 else if (fsig->params [0]->type == MONO_TYPE_R8)
6467 opcode = OP_ATOMIC_STORE_R8;
6468 #if SIZEOF_REGISTER == 8
6469 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6470 opcode = OP_ATOMIC_STORE_I8;
6471 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6472 opcode = OP_ATOMIC_STORE_U8;
6474 else if (fsig->params [0]->type == MONO_TYPE_I)
6475 opcode = OP_ATOMIC_STORE_I4;
6476 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6477 opcode = OP_ATOMIC_STORE_U4;
6481 if (!mono_arch_opcode_supported (opcode))
6484 MONO_INST_NEW (cfg, ins, opcode);
6485 ins->dreg = args [0]->dreg;
6486 ins->sreg1 = args [1]->dreg;
6487 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6488 MONO_ADD_INS (cfg->cbb, ins);
6490 if (cfg->gen_write_barriers && is_ref)
6491 emit_write_barrier (cfg, args [0], args [1]);
6497 } else if (cmethod->klass->image == mono_defaults.corlib &&
6498 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6499 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6500 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6501 if (should_insert_brekpoint (cfg->method)) {
6502 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6504 MONO_INST_NEW (cfg, ins, OP_NOP);
6505 MONO_ADD_INS (cfg->cbb, ins);
6509 } else if (cmethod->klass->image == mono_defaults.corlib &&
6510 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6511 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6512 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6514 EMIT_NEW_ICONST (cfg, ins, 1);
6516 EMIT_NEW_ICONST (cfg, ins, 0);
6519 } else if (cmethod->klass == mono_defaults.math_class) {
6521 * There is general branchless code for Min/Max, but it does not work for
6523 * http://everything2.com/?node_id=1051618
6525 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6526 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6527 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6528 !strcmp (cmethod->klass->name, "Selector")) {
6529 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6530 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6531 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6534 MonoJumpInfoToken *ji;
6537 cfg->disable_llvm = TRUE;
6539 if (args [0]->opcode == OP_GOT_ENTRY) {
6540 pi = args [0]->inst_p1;
6541 g_assert (pi->opcode == OP_PATCH_INFO);
6542 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6545 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6546 ji = args [0]->inst_p0;
6549 NULLIFY_INS (args [0]);
6552 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6553 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6554 ins->dreg = mono_alloc_ireg (cfg);
6556 ins->inst_p0 = mono_string_to_utf8 (s);
6557 MONO_ADD_INS (cfg->cbb, ins);
6563 #ifdef MONO_ARCH_SIMD_INTRINSICS
6564 if (cfg->opt & MONO_OPT_SIMD) {
6565 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6571 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6575 if (COMPILE_LLVM (cfg)) {
6576 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6581 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6585 * This entry point could be used later for arbitrary method
6588 inline static MonoInst*
6589 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6590 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6592 if (method->klass == mono_defaults.string_class) {
6593 /* managed string allocation support */
6594 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6595 MonoInst *iargs [2];
6596 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6597 MonoMethod *managed_alloc = NULL;
6599 g_assert (vtable); /*Should not fail since it System.String*/
6600 #ifndef MONO_CROSS_COMPILE
6601 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6605 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6606 iargs [1] = args [0];
6607 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6614 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6616 MonoInst *store, *temp;
6619 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6620 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6623 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6624 * would be different than the MonoInst's used to represent arguments, and
6625 * the ldelema implementation can't deal with that.
6626 * Solution: When ldelema is used on an inline argument, create a var for
6627 * it, emit ldelema on that var, and emit the saving code below in
6628 * inline_method () if needed.
6630 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6631 cfg->args [i] = temp;
6632 /* This uses cfg->args [i] which is set by the preceeding line */
6633 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6634 store->cil_code = sp [0]->cil_code;
6639 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6640 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6642 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6644 check_inline_called_method_name_limit (MonoMethod *called_method)
6647 static const char *limit = NULL;
6649 if (limit == NULL) {
6650 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6652 if (limit_string != NULL)
6653 limit = limit_string;
6658 if (limit [0] != '\0') {
6659 char *called_method_name = mono_method_full_name (called_method, TRUE);
6661 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6662 g_free (called_method_name);
6664 //return (strncmp_result <= 0);
6665 return (strncmp_result == 0);
6672 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6674 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6677 static const char *limit = NULL;
6679 if (limit == NULL) {
6680 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6681 if (limit_string != NULL) {
6682 limit = limit_string;
6688 if (limit [0] != '\0') {
6689 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6691 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6692 g_free (caller_method_name);
6694 //return (strncmp_result <= 0);
6695 return (strncmp_result == 0);
6703 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6705 static double r8_0 = 0.0;
6706 static float r4_0 = 0.0;
6710 rtype = mini_get_underlying_type (cfg, rtype);
6714 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6715 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6716 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6717 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6718 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6719 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6720 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6721 ins->type = STACK_R4;
6722 ins->inst_p0 = (void*)&r4_0;
6724 MONO_ADD_INS (cfg->cbb, ins);
6725 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6726 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6727 ins->type = STACK_R8;
6728 ins->inst_p0 = (void*)&r8_0;
6730 MONO_ADD_INS (cfg->cbb, ins);
6731 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6732 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6733 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6734 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6735 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6737 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6742 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6746 rtype = mini_get_underlying_type (cfg, rtype);
6750 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6751 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6752 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6753 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6754 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6755 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6756 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6757 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6758 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6759 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6760 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6761 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6762 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6763 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6765 emit_init_rvar (cfg, dreg, rtype);
6769 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6771 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6773 MonoInst *var = cfg->locals [local];
6774 if (COMPILE_SOFT_FLOAT (cfg)) {
6776 int reg = alloc_dreg (cfg, var->type);
6777 emit_init_rvar (cfg, reg, type);
6778 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6781 emit_init_rvar (cfg, var->dreg, type);
6783 emit_dummy_init_rvar (cfg, var->dreg, type);
6790 * Return the cost of inlining CMETHOD.
6793 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6794 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6796 MonoInst *ins, *rvar = NULL;
6797 MonoMethodHeader *cheader;
6798 MonoBasicBlock *ebblock, *sbblock;
6800 MonoMethod *prev_inlined_method;
6801 MonoInst **prev_locals, **prev_args;
6802 MonoType **prev_arg_types;
6803 guint prev_real_offset;
6804 GHashTable *prev_cbb_hash;
6805 MonoBasicBlock **prev_cil_offset_to_bb;
6806 MonoBasicBlock *prev_cbb;
6807 unsigned char* prev_cil_start;
6808 guint32 prev_cil_offset_to_bb_len;
6809 MonoMethod *prev_current_method;
6810 MonoGenericContext *prev_generic_context;
6811 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6813 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6815 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6816 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6819 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6820 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6825 fsig = mono_method_signature (cmethod);
6827 if (cfg->verbose_level > 2)
6828 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6830 if (!cmethod->inline_info) {
6831 cfg->stat_inlineable_methods++;
6832 cmethod->inline_info = 1;
6835 /* allocate local variables */
6836 cheader = mono_method_get_header (cmethod);
6838 if (cheader == NULL || mono_loader_get_last_error ()) {
6839 MonoLoaderError *error = mono_loader_get_last_error ();
6842 mono_metadata_free_mh (cheader);
6843 if (inline_always && error)
6844 mono_cfg_set_exception (cfg, error->exception_type);
6846 mono_loader_clear_error ();
6850 /*Must verify before creating locals as it can cause the JIT to assert.*/
6851 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6852 mono_metadata_free_mh (cheader);
6856 /* allocate space to store the return value */
6857 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6858 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6861 prev_locals = cfg->locals;
6862 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6863 for (i = 0; i < cheader->num_locals; ++i)
6864 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6866 /* allocate start and end blocks */
6867 /* This is needed so if the inline is aborted, we can clean up */
6868 NEW_BBLOCK (cfg, sbblock);
6869 sbblock->real_offset = real_offset;
6871 NEW_BBLOCK (cfg, ebblock);
6872 ebblock->block_num = cfg->num_bblocks++;
6873 ebblock->real_offset = real_offset;
6875 prev_args = cfg->args;
6876 prev_arg_types = cfg->arg_types;
6877 prev_inlined_method = cfg->inlined_method;
6878 cfg->inlined_method = cmethod;
6879 cfg->ret_var_set = FALSE;
6880 cfg->inline_depth ++;
6881 prev_real_offset = cfg->real_offset;
6882 prev_cbb_hash = cfg->cbb_hash;
6883 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6884 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6885 prev_cil_start = cfg->cil_start;
6886 prev_cbb = cfg->cbb;
6887 prev_current_method = cfg->current_method;
6888 prev_generic_context = cfg->generic_context;
6889 prev_ret_var_set = cfg->ret_var_set;
6890 prev_disable_inline = cfg->disable_inline;
6892 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6895 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6897 ret_var_set = cfg->ret_var_set;
6899 cfg->inlined_method = prev_inlined_method;
6900 cfg->real_offset = prev_real_offset;
6901 cfg->cbb_hash = prev_cbb_hash;
6902 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6903 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6904 cfg->cil_start = prev_cil_start;
6905 cfg->locals = prev_locals;
6906 cfg->args = prev_args;
6907 cfg->arg_types = prev_arg_types;
6908 cfg->current_method = prev_current_method;
6909 cfg->generic_context = prev_generic_context;
6910 cfg->ret_var_set = prev_ret_var_set;
6911 cfg->disable_inline = prev_disable_inline;
6912 cfg->inline_depth --;
6914 if ((costs >= 0 && costs < 60) || inline_always) {
6915 if (cfg->verbose_level > 2)
6916 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6918 cfg->stat_inlined_methods++;
6920 /* always add some code to avoid block split failures */
6921 MONO_INST_NEW (cfg, ins, OP_NOP);
6922 MONO_ADD_INS (prev_cbb, ins);
6924 prev_cbb->next_bb = sbblock;
6925 link_bblock (cfg, prev_cbb, sbblock);
6928 * Get rid of the begin and end bblocks if possible to aid local
6931 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6933 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6934 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6936 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6937 MonoBasicBlock *prev = ebblock->in_bb [0];
6938 mono_merge_basic_blocks (cfg, prev, ebblock);
6940 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6941 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6942 cfg->cbb = prev_cbb;
6946 * Its possible that the rvar is set in some prev bblock, but not in others.
6952 for (i = 0; i < ebblock->in_count; ++i) {
6953 bb = ebblock->in_bb [i];
6955 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6958 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6967 *out_cbb = cfg->cbb;
6971 * If the inlined method contains only a throw, then the ret var is not
6972 * set, so set it to a dummy value.
6975 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6977 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6980 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6983 if (cfg->verbose_level > 2)
6984 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6985 cfg->exception_type = MONO_EXCEPTION_NONE;
6986 mono_loader_clear_error ();
6988 /* This gets rid of the newly added bblocks */
6989 cfg->cbb = prev_cbb;
6991 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6996 * Some of these comments may well be out-of-date.
6997 * Design decisions: we do a single pass over the IL code (and we do bblock
6998 * splitting/merging in the few cases when it's required: a back jump to an IL
6999 * address that was not already seen as bblock starting point).
7000 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7001 * Complex operations are decomposed in simpler ones right away. We need to let the
7002 * arch-specific code peek and poke inside this process somehow (except when the
7003 * optimizations can take advantage of the full semantic info of coarse opcodes).
7004 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7005 * MonoInst->opcode initially is the IL opcode or some simplification of that
7006 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7007 * opcode with value bigger than OP_LAST.
7008 * At this point the IR can be handed over to an interpreter, a dumb code generator
7009 * or to the optimizing code generator that will translate it to SSA form.
7011 * Profiling directed optimizations.
7012 * We may compile by default with few or no optimizations and instrument the code
7013 * or the user may indicate what methods to optimize the most either in a config file
7014 * or through repeated runs where the compiler applies offline the optimizations to
7015 * each method and then decides if it was worth it.
7018 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7019 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7020 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7021 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7022 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7023 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7024 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7025 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7027 /* offset from br.s -> br like opcodes */
7028 #define BIG_BRANCH_OFFSET 13
7031 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7033 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7035 return b == NULL || b == bb;
7039 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7041 unsigned char *ip = start;
7042 unsigned char *target;
7045 MonoBasicBlock *bblock;
7046 const MonoOpcode *opcode;
7049 cli_addr = ip - start;
7050 i = mono_opcode_value ((const guint8 **)&ip, end);
7053 opcode = &mono_opcodes [i];
7054 switch (opcode->argument) {
7055 case MonoInlineNone:
7058 case MonoInlineString:
7059 case MonoInlineType:
7060 case MonoInlineField:
7061 case MonoInlineMethod:
7064 case MonoShortInlineR:
7071 case MonoShortInlineVar:
7072 case MonoShortInlineI:
7075 case MonoShortInlineBrTarget:
7076 target = start + cli_addr + 2 + (signed char)ip [1];
7077 GET_BBLOCK (cfg, bblock, target);
7080 GET_BBLOCK (cfg, bblock, ip);
7082 case MonoInlineBrTarget:
7083 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7084 GET_BBLOCK (cfg, bblock, target);
7087 GET_BBLOCK (cfg, bblock, ip);
7089 case MonoInlineSwitch: {
7090 guint32 n = read32 (ip + 1);
7093 cli_addr += 5 + 4 * n;
7094 target = start + cli_addr;
7095 GET_BBLOCK (cfg, bblock, target);
7097 for (j = 0; j < n; ++j) {
7098 target = start + cli_addr + (gint32)read32 (ip);
7099 GET_BBLOCK (cfg, bblock, target);
7109 g_assert_not_reached ();
7112 if (i == CEE_THROW) {
7113 unsigned char *bb_start = ip - 1;
7115 /* Find the start of the bblock containing the throw */
7117 while ((bb_start >= start) && !bblock) {
7118 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7122 bblock->out_of_line = 1;
7132 static inline MonoMethod *
7133 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7137 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7138 method = mono_method_get_wrapper_data (m, token);
7141 method = mono_class_inflate_generic_method_checked (method, context, &error);
7142 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7145 method = mono_get_method_full (m->klass->image, token, klass, context);
7151 static inline MonoMethod *
7152 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7154 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7156 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7162 static inline MonoClass*
7163 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7168 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7169 klass = mono_method_get_wrapper_data (method, token);
7171 klass = mono_class_inflate_generic_class (klass, context);
7173 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7174 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7177 mono_class_init (klass);
7181 static inline MonoMethodSignature*
7182 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7184 MonoMethodSignature *fsig;
7186 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7189 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7191 fsig = mono_inflate_generic_signature (fsig, context, &error);
7193 g_assert (mono_error_ok (&error));
7196 fsig = mono_metadata_parse_signature (method->klass->image, token);
7202 * Returns TRUE if the JIT should abort inlining because "callee"
7203 * is influenced by security attributes.
7206 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7210 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7214 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7215 if (result == MONO_JIT_SECURITY_OK)
7218 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7219 /* Generate code to throw a SecurityException before the actual call/link */
7220 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7223 NEW_ICONST (cfg, args [0], 4);
7224 NEW_METHODCONST (cfg, args [1], caller);
7225 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7226 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7227 /* don't hide previous results */
7228 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7229 cfg->exception_data = result;
7237 throw_exception (void)
7239 static MonoMethod *method = NULL;
7242 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7243 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7250 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7252 MonoMethod *thrower = throw_exception ();
7255 EMIT_NEW_PCONST (cfg, args [0], ex);
7256 mono_emit_method_call (cfg, thrower, args, NULL);
7260 * Return the original method is a wrapper is specified. We can only access
7261 * the custom attributes from the original method.
7264 get_original_method (MonoMethod *method)
7266 if (method->wrapper_type == MONO_WRAPPER_NONE)
7269 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7270 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7273 /* in other cases we need to find the original method */
7274 return mono_marshal_method_from_wrapper (method);
7278 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7279 MonoBasicBlock *bblock, unsigned char *ip)
7281 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7282 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7284 emit_throw_exception (cfg, ex);
7288 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7289 MonoBasicBlock *bblock, unsigned char *ip)
7291 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7292 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7294 emit_throw_exception (cfg, ex);
7298 * Check that the IL instructions at ip are the array initialization
7299 * sequence and return the pointer to the data and the size.
7302 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7305 * newarr[System.Int32]
7307 * ldtoken field valuetype ...
7308 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7310 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7312 guint32 token = read32 (ip + 7);
7313 guint32 field_token = read32 (ip + 2);
7314 guint32 field_index = field_token & 0xffffff;
7316 const char *data_ptr;
7318 MonoMethod *cmethod;
7319 MonoClass *dummy_class;
7320 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7324 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7328 *out_field_token = field_token;
7330 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7333 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7335 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7336 case MONO_TYPE_BOOLEAN:
7340 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7341 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7342 case MONO_TYPE_CHAR:
7359 if (size > mono_type_size (field->type, &dummy_align))
7362 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7363 if (!image_is_dynamic (method->klass->image)) {
7364 field_index = read32 (ip + 2) & 0xffffff;
7365 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7366 data_ptr = mono_image_rva_map (method->klass->image, rva);
7367 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7368 /* for aot code we do the lookup on load */
7369 if (aot && data_ptr)
7370 return GUINT_TO_POINTER (rva);
7372 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7374 data_ptr = mono_field_get_data (field);
7382 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7384 char *method_fname = mono_method_full_name (method, TRUE);
7386 MonoMethodHeader *header = mono_method_get_header (method);
7388 if (header->code_size == 0)
7389 method_code = g_strdup ("method body is empty.");
7391 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7393 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7394 g_free (method_fname);
7395 g_free (method_code);
7396 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7400 set_exception_object (MonoCompile *cfg, MonoException *exception)
7402 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7403 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7404 cfg->exception_ptr = exception;
7408 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7411 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7412 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7413 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7414 /* Optimize reg-reg moves away */
7416 * Can't optimize other opcodes, since sp[0] might point to
7417 * the last ins of a decomposed opcode.
7419 sp [0]->dreg = (cfg)->locals [n]->dreg;
7421 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7426 * ldloca inhibits many optimizations so try to get rid of it in common
7429 static inline unsigned char *
7430 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7440 local = read16 (ip + 2);
7444 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7445 /* From the INITOBJ case */
7446 token = read32 (ip + 2);
7447 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7448 CHECK_TYPELOAD (klass);
7449 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7450 emit_init_local (cfg, local, type, TRUE);
7458 is_exception_class (MonoClass *class)
7461 if (class == mono_defaults.exception_class)
7463 class = class->parent;
7469 * is_jit_optimizer_disabled:
7471 * Determine whenever M's assembly has a DebuggableAttribute with the
7472 * IsJITOptimizerDisabled flag set.
7475 is_jit_optimizer_disabled (MonoMethod *m)
7477 MonoAssembly *ass = m->klass->image->assembly;
7478 MonoCustomAttrInfo* attrs;
7479 static MonoClass *klass;
7481 gboolean val = FALSE;
7484 if (ass->jit_optimizer_disabled_inited)
7485 return ass->jit_optimizer_disabled;
7488 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7491 ass->jit_optimizer_disabled = FALSE;
7492 mono_memory_barrier ();
7493 ass->jit_optimizer_disabled_inited = TRUE;
7497 attrs = mono_custom_attrs_from_assembly (ass);
7499 for (i = 0; i < attrs->num_attrs; ++i) {
7500 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7502 MonoMethodSignature *sig;
7504 if (!attr->ctor || attr->ctor->klass != klass)
7506 /* Decode the attribute. See reflection.c */
7507 p = (const char*)attr->data;
7508 g_assert (read16 (p) == 0x0001);
7511 // FIXME: Support named parameters
7512 sig = mono_method_signature (attr->ctor);
7513 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7515 /* Two boolean arguments */
7519 mono_custom_attrs_free (attrs);
7522 ass->jit_optimizer_disabled = val;
7523 mono_memory_barrier ();
7524 ass->jit_optimizer_disabled_inited = TRUE;
7530 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7532 gboolean supported_tail_call;
7535 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7536 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7538 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7541 for (i = 0; i < fsig->param_count; ++i) {
7542 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7543 /* These can point to the current method's stack */
7544 supported_tail_call = FALSE;
7546 if (fsig->hasthis && cmethod->klass->valuetype)
7547 /* this might point to the current method's stack */
7548 supported_tail_call = FALSE;
7549 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7550 supported_tail_call = FALSE;
7551 if (cfg->method->save_lmf)
7552 supported_tail_call = FALSE;
7553 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7554 supported_tail_call = FALSE;
7555 if (call_opcode != CEE_CALL)
7556 supported_tail_call = FALSE;
7558 /* Debugging support */
7560 if (supported_tail_call) {
7561 if (!mono_debug_count ())
7562 supported_tail_call = FALSE;
7566 return supported_tail_call;
7569 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7570 * it to the thread local value based on the tls_offset field. Every other kind of access to
7571 * the field causes an assert.
7574 is_magic_tls_access (MonoClassField *field)
7576 if (strcmp (field->name, "tlsdata"))
7578 if (strcmp (field->parent->name, "ThreadLocal`1"))
7580 return field->parent->image == mono_defaults.corlib;
7583 /* emits the code needed to access a managed tls var (like ThreadStatic)
7584 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7585 * pointer for the current thread.
7586 * Returns the MonoInst* representing the address of the tls var.
7589 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7592 int static_data_reg, array_reg, dreg;
7593 int offset2_reg, idx_reg;
7594 // inlined access to the tls data
7595 // idx = (offset >> 24) - 1;
7596 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7597 static_data_reg = alloc_ireg (cfg);
7598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7599 idx_reg = alloc_ireg (cfg);
7600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7603 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7604 array_reg = alloc_ireg (cfg);
7605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7606 offset2_reg = alloc_ireg (cfg);
7607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7608 dreg = alloc_ireg (cfg);
7609 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7614 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7615 * this address is cached per-method in cached_tls_addr.
7618 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7620 MonoInst *load, *addr, *temp, *store, *thread_ins;
7621 MonoClassField *offset_field;
7623 if (*cached_tls_addr) {
7624 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7627 thread_ins = mono_get_thread_intrinsic (cfg);
7628 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7630 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7632 MONO_ADD_INS (cfg->cbb, thread_ins);
7634 MonoMethod *thread_method;
7635 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7636 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7638 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7639 addr->klass = mono_class_from_mono_type (tls_field->type);
7640 addr->type = STACK_MP;
7641 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7642 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7644 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7651 * Handle calls made to ctors from NEWOBJ opcodes.
7653 * REF_BBLOCK will point to the current bblock after the call.
7656 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7657 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7659 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7660 MonoBasicBlock *bblock = *ref_bblock;
7662 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7663 mono_method_is_generic_sharable (cmethod, TRUE)) {
7664 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7665 mono_class_vtable (cfg->domain, cmethod->klass);
7666 CHECK_TYPELOAD (cmethod->klass);
7668 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7669 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7672 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7673 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7675 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7677 CHECK_TYPELOAD (cmethod->klass);
7678 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7683 /* Avoid virtual calls to ctors if possible */
7684 if (mono_class_is_marshalbyref (cmethod->klass))
7685 callvirt_this_arg = sp [0];
7687 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7688 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7689 CHECK_CFG_EXCEPTION;
7690 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7691 mono_method_check_inlining (cfg, cmethod) &&
7692 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7695 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7696 cfg->real_offset += 5;
7698 *inline_costs += costs - 5;
7699 *ref_bblock = bblock;
7701 INLINE_FAILURE ("inline failure");
7702 // FIXME-VT: Clean this up
7703 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7704 GSHAREDVT_FAILURE(*ip);
7705 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7707 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7710 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7711 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7712 } else if (context_used &&
7713 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7714 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7715 MonoInst *cmethod_addr;
7717 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7719 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7720 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7722 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7724 INLINE_FAILURE ("ctor call");
7725 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7726 callvirt_this_arg, NULL, vtable_arg);
7733 * mono_method_to_ir:
7735 * Translate the .net IL into linear IR.
7738 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7739 MonoInst *return_var, MonoInst **inline_args,
7740 guint inline_offset, gboolean is_virtual_call)
7743 MonoInst *ins, **sp, **stack_start;
7744 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7745 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7746 MonoMethod *cmethod, *method_definition;
7747 MonoInst **arg_array;
7748 MonoMethodHeader *header;
7750 guint32 token, ins_flag;
7752 MonoClass *constrained_class = NULL;
7753 unsigned char *ip, *end, *target, *err_pos;
7754 MonoMethodSignature *sig;
7755 MonoGenericContext *generic_context = NULL;
7756 MonoGenericContainer *generic_container = NULL;
7757 MonoType **param_types;
7758 int i, n, start_new_bblock, dreg;
7759 int num_calls = 0, inline_costs = 0;
7760 int breakpoint_id = 0;
7762 MonoBoolean security, pinvoke;
7763 MonoSecurityManager* secman = NULL;
7764 MonoDeclSecurityActions actions;
7765 GSList *class_inits = NULL;
7766 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7768 gboolean init_locals, seq_points, skip_dead_blocks;
7769 gboolean sym_seq_points = FALSE;
7770 MonoInst *cached_tls_addr = NULL;
7771 MonoDebugMethodInfo *minfo;
7772 MonoBitSet *seq_point_locs = NULL;
7773 MonoBitSet *seq_point_set_locs = NULL;
7775 cfg->disable_inline = is_jit_optimizer_disabled (method);
7777 /* serialization and xdomain stuff may need access to private fields and methods */
7778 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7779 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7780 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7781 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7782 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7783 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7785 dont_verify |= mono_security_smcs_hack_enabled ();
7787 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7788 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7789 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7790 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7791 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7793 image = method->klass->image;
7794 header = mono_method_get_header (method);
7796 MonoLoaderError *error;
7798 if ((error = mono_loader_get_last_error ())) {
7799 mono_cfg_set_exception (cfg, error->exception_type);
7801 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7802 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7804 goto exception_exit;
7806 generic_container = mono_method_get_generic_container (method);
7807 sig = mono_method_signature (method);
7808 num_args = sig->hasthis + sig->param_count;
7809 ip = (unsigned char*)header->code;
7810 cfg->cil_start = ip;
7811 end = ip + header->code_size;
7812 cfg->stat_cil_code_size += header->code_size;
7814 seq_points = cfg->gen_seq_points && cfg->method == method;
7816 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7817 /* We could hit a seq point before attaching to the JIT (#8338) */
7821 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7822 minfo = mono_debug_lookup_method (method);
7824 int i, n_il_offsets;
7828 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7829 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7830 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7831 sym_seq_points = TRUE;
7832 for (i = 0; i < n_il_offsets; ++i) {
7833 if (il_offsets [i] < header->code_size)
7834 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7836 g_free (il_offsets);
7837 g_free (line_numbers);
7838 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7839 /* Methods without line number info like auto-generated property accessors */
7840 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7841 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7842 sym_seq_points = TRUE;
7847 * Methods without init_locals set could cause asserts in various passes
7848 * (#497220). To work around this, we emit dummy initialization opcodes
7849 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7850 * on some platforms.
7852 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7853 init_locals = header->init_locals;
7857 method_definition = method;
7858 while (method_definition->is_inflated) {
7859 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7860 method_definition = imethod->declaring;
7863 /* SkipVerification is not allowed if core-clr is enabled */
7864 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7866 dont_verify_stloc = TRUE;
7869 if (sig->is_inflated)
7870 generic_context = mono_method_get_context (method);
7871 else if (generic_container)
7872 generic_context = &generic_container->context;
7873 cfg->generic_context = generic_context;
7875 if (!cfg->generic_sharing_context)
7876 g_assert (!sig->has_type_parameters);
7878 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7879 g_assert (method->is_inflated);
7880 g_assert (mono_method_get_context (method)->method_inst);
7882 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7883 g_assert (sig->generic_param_count);
7885 if (cfg->method == method) {
7886 cfg->real_offset = 0;
7888 cfg->real_offset = inline_offset;
7891 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7892 cfg->cil_offset_to_bb_len = header->code_size;
7894 cfg->current_method = method;
7896 if (cfg->verbose_level > 2)
7897 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7899 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7901 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7902 for (n = 0; n < sig->param_count; ++n)
7903 param_types [n + sig->hasthis] = sig->params [n];
7904 cfg->arg_types = param_types;
7906 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7907 if (cfg->method == method) {
7909 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7910 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7913 NEW_BBLOCK (cfg, start_bblock);
7914 cfg->bb_entry = start_bblock;
7915 start_bblock->cil_code = NULL;
7916 start_bblock->cil_length = 0;
7917 #if defined(__native_client_codegen__)
7918 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7919 ins->dreg = alloc_dreg (cfg, STACK_I4);
7920 MONO_ADD_INS (start_bblock, ins);
7924 NEW_BBLOCK (cfg, end_bblock);
7925 cfg->bb_exit = end_bblock;
7926 end_bblock->cil_code = NULL;
7927 end_bblock->cil_length = 0;
7928 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7929 g_assert (cfg->num_bblocks == 2);
7931 arg_array = cfg->args;
7933 if (header->num_clauses) {
7934 cfg->spvars = g_hash_table_new (NULL, NULL);
7935 cfg->exvars = g_hash_table_new (NULL, NULL);
7937 /* handle exception clauses */
7938 for (i = 0; i < header->num_clauses; ++i) {
7939 MonoBasicBlock *try_bb;
7940 MonoExceptionClause *clause = &header->clauses [i];
7941 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7942 try_bb->real_offset = clause->try_offset;
7943 try_bb->try_start = TRUE;
7944 try_bb->region = ((i + 1) << 8) | clause->flags;
7945 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7946 tblock->real_offset = clause->handler_offset;
7947 tblock->flags |= BB_EXCEPTION_HANDLER;
7950 * Linking the try block with the EH block hinders inlining as we won't be able to
7951 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7953 if (COMPILE_LLVM (cfg))
7954 link_bblock (cfg, try_bb, tblock);
7956 if (*(ip + clause->handler_offset) == CEE_POP)
7957 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7959 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7960 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7961 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7962 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7963 MONO_ADD_INS (tblock, ins);
7965 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7966 /* finally clauses already have a seq point */
7967 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7968 MONO_ADD_INS (tblock, ins);
7971 /* todo: is a fault block unsafe to optimize? */
7972 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7973 tblock->flags |= BB_EXCEPTION_UNSAFE;
7977 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7979 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7981 /* catch and filter blocks get the exception object on the stack */
7982 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7983 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7984 MonoInst *dummy_use;
7986 /* mostly like handle_stack_args (), but just sets the input args */
7987 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7988 tblock->in_scount = 1;
7989 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7990 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7993 * Add a dummy use for the exvar so its liveness info will be
7997 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7999 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8000 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8001 tblock->flags |= BB_EXCEPTION_HANDLER;
8002 tblock->real_offset = clause->data.filter_offset;
8003 tblock->in_scount = 1;
8004 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8005 /* The filter block shares the exvar with the handler block */
8006 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8007 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8008 MONO_ADD_INS (tblock, ins);
8012 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8013 clause->data.catch_class &&
8014 cfg->generic_sharing_context &&
8015 mono_class_check_context_used (clause->data.catch_class)) {
8017 * In shared generic code with catch
8018 * clauses containing type variables
8019 * the exception handling code has to
8020 * be able to get to the rgctx.
8021 * Therefore we have to make sure that
8022 * the vtable/mrgctx argument (for
8023 * static or generic methods) or the
8024 * "this" argument (for non-static
8025 * methods) are live.
8027 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8028 mini_method_get_context (method)->method_inst ||
8029 method->klass->valuetype) {
8030 mono_get_vtable_var (cfg);
8032 MonoInst *dummy_use;
8034 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8039 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8040 cfg->cbb = start_bblock;
8041 cfg->args = arg_array;
8042 mono_save_args (cfg, sig, inline_args);
8045 /* FIRST CODE BLOCK */
8046 NEW_BBLOCK (cfg, bblock);
8047 bblock->cil_code = ip;
8051 ADD_BBLOCK (cfg, bblock);
8053 if (cfg->method == method) {
8054 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8055 if (breakpoint_id) {
8056 MONO_INST_NEW (cfg, ins, OP_BREAK);
8057 MONO_ADD_INS (bblock, ins);
8061 if (mono_security_cas_enabled ())
8062 secman = mono_security_manager_get_methods ();
8064 security = (secman && mono_security_method_has_declsec (method));
8065 /* at this point having security doesn't mean we have any code to generate */
8066 if (security && (cfg->method == method)) {
8067 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8068 * And we do not want to enter the next section (with allocation) if we
8069 * have nothing to generate */
8070 security = mono_declsec_get_demands (method, &actions);
8073 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8074 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8076 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8077 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8078 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8080 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8081 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8085 mono_custom_attrs_free (custom);
8088 custom = mono_custom_attrs_from_class (wrapped->klass);
8089 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8093 mono_custom_attrs_free (custom);
8096 /* not a P/Invoke after all */
8101 /* we use a separate basic block for the initialization code */
8102 NEW_BBLOCK (cfg, init_localsbb);
8103 cfg->bb_init = init_localsbb;
8104 init_localsbb->real_offset = cfg->real_offset;
8105 start_bblock->next_bb = init_localsbb;
8106 init_localsbb->next_bb = bblock;
8107 link_bblock (cfg, start_bblock, init_localsbb);
8108 link_bblock (cfg, init_localsbb, bblock);
8110 cfg->cbb = init_localsbb;
8112 if (cfg->gsharedvt && cfg->method == method) {
8113 MonoGSharedVtMethodInfo *info;
8114 MonoInst *var, *locals_var;
8117 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8118 info->method = cfg->method;
8119 info->count_entries = 16;
8120 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8121 cfg->gsharedvt_info = info;
8123 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8124 /* prevent it from being register allocated */
8125 //var->flags |= MONO_INST_VOLATILE;
8126 cfg->gsharedvt_info_var = var;
8128 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8129 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8131 /* Allocate locals */
8132 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8133 /* prevent it from being register allocated */
8134 //locals_var->flags |= MONO_INST_VOLATILE;
8135 cfg->gsharedvt_locals_var = locals_var;
8137 dreg = alloc_ireg (cfg);
8138 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8140 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8141 ins->dreg = locals_var->dreg;
8143 MONO_ADD_INS (cfg->cbb, ins);
8144 cfg->gsharedvt_locals_var_ins = ins;
8146 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8149 ins->flags |= MONO_INST_INIT;
8153 /* at this point we know, if security is TRUE, that some code needs to be generated */
8154 if (security && (cfg->method == method)) {
8157 cfg->stat_cas_demand_generation++;
8159 if (actions.demand.blob) {
8160 /* Add code for SecurityAction.Demand */
8161 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8162 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8163 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8164 mono_emit_method_call (cfg, secman->demand, args, NULL);
8166 if (actions.noncasdemand.blob) {
8167 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8168 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8169 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8170 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8171 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8172 mono_emit_method_call (cfg, secman->demand, args, NULL);
8174 if (actions.demandchoice.blob) {
8175 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8176 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8177 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8178 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8179 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8183 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8185 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8188 if (mono_security_core_clr_enabled ()) {
8189 /* check if this is native code, e.g. an icall or a p/invoke */
8190 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8191 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8193 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8194 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8196 /* if this ia a native call then it can only be JITted from platform code */
8197 if ((icall || pinvk) && method->klass && method->klass->image) {
8198 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8199 MonoException *ex = icall ? mono_get_exception_security () :
8200 mono_get_exception_method_access ();
8201 emit_throw_exception (cfg, ex);
8208 CHECK_CFG_EXCEPTION;
8210 if (header->code_size == 0)
8213 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8218 if (cfg->method == method)
8219 mono_debug_init_method (cfg, bblock, breakpoint_id);
8221 for (n = 0; n < header->num_locals; ++n) {
8222 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8227 /* We force the vtable variable here for all shared methods
8228 for the possibility that they might show up in a stack
8229 trace where their exact instantiation is needed. */
8230 if (cfg->generic_sharing_context && method == cfg->method) {
8231 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8232 mini_method_get_context (method)->method_inst ||
8233 method->klass->valuetype) {
8234 mono_get_vtable_var (cfg);
8236 /* FIXME: Is there a better way to do this?
8237 We need the variable live for the duration
8238 of the whole method. */
8239 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8243 /* add a check for this != NULL to inlined methods */
8244 if (is_virtual_call) {
8247 NEW_ARGLOAD (cfg, arg_ins, 0);
8248 MONO_ADD_INS (cfg->cbb, arg_ins);
8249 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8252 skip_dead_blocks = !dont_verify;
8253 if (skip_dead_blocks) {
8254 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8259 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8260 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8263 start_new_bblock = 0;
8266 if (cfg->method == method)
8267 cfg->real_offset = ip - header->code;
8269 cfg->real_offset = inline_offset;
8274 if (start_new_bblock) {
8275 bblock->cil_length = ip - bblock->cil_code;
8276 if (start_new_bblock == 2) {
8277 g_assert (ip == tblock->cil_code);
8279 GET_BBLOCK (cfg, tblock, ip);
8281 bblock->next_bb = tblock;
8284 start_new_bblock = 0;
8285 for (i = 0; i < bblock->in_scount; ++i) {
8286 if (cfg->verbose_level > 3)
8287 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8288 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8292 g_slist_free (class_inits);
8295 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8296 link_bblock (cfg, bblock, tblock);
8297 if (sp != stack_start) {
8298 handle_stack_args (cfg, stack_start, sp - stack_start);
8300 CHECK_UNVERIFIABLE (cfg);
8302 bblock->next_bb = tblock;
8305 for (i = 0; i < bblock->in_scount; ++i) {
8306 if (cfg->verbose_level > 3)
8307 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8308 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8311 g_slist_free (class_inits);
8316 if (skip_dead_blocks) {
8317 int ip_offset = ip - header->code;
8319 if (ip_offset == bb->end)
8323 int op_size = mono_opcode_size (ip, end);
8324 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8326 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8328 if (ip_offset + op_size == bb->end) {
8329 MONO_INST_NEW (cfg, ins, OP_NOP);
8330 MONO_ADD_INS (bblock, ins);
8331 start_new_bblock = 1;
8339 * Sequence points are points where the debugger can place a breakpoint.
8340 * Currently, we generate these automatically at points where the IL
8343 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8345 * Make methods interruptable at the beginning, and at the targets of
8346 * backward branches.
8347 * Also, do this at the start of every bblock in methods with clauses too,
8348 * to be able to handle instructions with inprecise control flow like
8350 * Backward branches are handled at the end of method-to-ir ().
8352 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8354 /* Avoid sequence points on empty IL like .volatile */
8355 // FIXME: Enable this
8356 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8357 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8358 if (sp != stack_start)
8359 ins->flags |= MONO_INST_NONEMPTY_STACK;
8360 MONO_ADD_INS (cfg->cbb, ins);
8363 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8366 bblock->real_offset = cfg->real_offset;
8368 if ((cfg->method == method) && cfg->coverage_info) {
8369 guint32 cil_offset = ip - header->code;
8370 cfg->coverage_info->data [cil_offset].cil_code = ip;
8372 /* TODO: Use an increment here */
8373 #if defined(TARGET_X86)
8374 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8375 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8377 MONO_ADD_INS (cfg->cbb, ins);
8379 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8380 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8384 if (cfg->verbose_level > 3)
8385 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8389 if (seq_points && !sym_seq_points && sp != stack_start) {
8391 * The C# compiler uses these nops to notify the JIT that it should
8392 * insert seq points.
8394 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8395 MONO_ADD_INS (cfg->cbb, ins);
8397 if (cfg->keep_cil_nops)
8398 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8400 MONO_INST_NEW (cfg, ins, OP_NOP);
8402 MONO_ADD_INS (bblock, ins);
8405 if (should_insert_brekpoint (cfg->method)) {
8406 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8408 MONO_INST_NEW (cfg, ins, OP_NOP);
8411 MONO_ADD_INS (bblock, ins);
8417 CHECK_STACK_OVF (1);
8418 n = (*ip)-CEE_LDARG_0;
8420 EMIT_NEW_ARGLOAD (cfg, ins, n);
8428 CHECK_STACK_OVF (1);
8429 n = (*ip)-CEE_LDLOC_0;
8431 EMIT_NEW_LOCLOAD (cfg, ins, n);
8440 n = (*ip)-CEE_STLOC_0;
8443 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8445 emit_stloc_ir (cfg, sp, header, n);
8452 CHECK_STACK_OVF (1);
8455 EMIT_NEW_ARGLOAD (cfg, ins, n);
8461 CHECK_STACK_OVF (1);
8464 NEW_ARGLOADA (cfg, ins, n);
8465 MONO_ADD_INS (cfg->cbb, ins);
8475 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8477 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8482 CHECK_STACK_OVF (1);
8485 EMIT_NEW_LOCLOAD (cfg, ins, n);
8489 case CEE_LDLOCA_S: {
8490 unsigned char *tmp_ip;
8492 CHECK_STACK_OVF (1);
8493 CHECK_LOCAL (ip [1]);
8495 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8501 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8510 CHECK_LOCAL (ip [1]);
8511 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8513 emit_stloc_ir (cfg, sp, header, ip [1]);
8518 CHECK_STACK_OVF (1);
8519 EMIT_NEW_PCONST (cfg, ins, NULL);
8520 ins->type = STACK_OBJ;
8525 CHECK_STACK_OVF (1);
8526 EMIT_NEW_ICONST (cfg, ins, -1);
8539 CHECK_STACK_OVF (1);
8540 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8546 CHECK_STACK_OVF (1);
8548 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8554 CHECK_STACK_OVF (1);
8555 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8561 CHECK_STACK_OVF (1);
8562 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8563 ins->type = STACK_I8;
8564 ins->dreg = alloc_dreg (cfg, STACK_I8);
8566 ins->inst_l = (gint64)read64 (ip);
8567 MONO_ADD_INS (bblock, ins);
8573 gboolean use_aotconst = FALSE;
8575 #ifdef TARGET_POWERPC
8576 /* FIXME: Clean this up */
8577 if (cfg->compile_aot)
8578 use_aotconst = TRUE;
8581 /* FIXME: we should really allocate this only late in the compilation process */
8582 f = mono_domain_alloc (cfg->domain, sizeof (float));
8584 CHECK_STACK_OVF (1);
8590 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8592 dreg = alloc_freg (cfg);
8593 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8594 ins->type = cfg->r4_stack_type;
8596 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8597 ins->type = cfg->r4_stack_type;
8598 ins->dreg = alloc_dreg (cfg, STACK_R8);
8600 MONO_ADD_INS (bblock, ins);
8610 gboolean use_aotconst = FALSE;
8612 #ifdef TARGET_POWERPC
8613 /* FIXME: Clean this up */
8614 if (cfg->compile_aot)
8615 use_aotconst = TRUE;
8618 /* FIXME: we should really allocate this only late in the compilation process */
8619 d = mono_domain_alloc (cfg->domain, sizeof (double));
8621 CHECK_STACK_OVF (1);
8627 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8629 dreg = alloc_freg (cfg);
8630 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8631 ins->type = STACK_R8;
8633 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8634 ins->type = STACK_R8;
8635 ins->dreg = alloc_dreg (cfg, STACK_R8);
8637 MONO_ADD_INS (bblock, ins);
8646 MonoInst *temp, *store;
8648 CHECK_STACK_OVF (1);
8652 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8653 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8655 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8658 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8671 if (sp [0]->type == STACK_R8)
8672 /* we need to pop the value from the x86 FP stack */
8673 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8679 INLINE_FAILURE ("jmp");
8680 GSHAREDVT_FAILURE (*ip);
8683 if (stack_start != sp)
8685 token = read32 (ip + 1);
8686 /* FIXME: check the signature matches */
8687 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8689 if (!cmethod || mono_loader_get_last_error ())
8692 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8693 GENERIC_SHARING_FAILURE (CEE_JMP);
8695 if (mono_security_cas_enabled ())
8696 CHECK_CFG_EXCEPTION;
8698 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8700 if (ARCH_HAVE_OP_TAIL_CALL) {
8701 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8704 /* Handle tail calls similarly to calls */
8705 n = fsig->param_count + fsig->hasthis;
8709 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8710 call->method = cmethod;
8711 call->tail_call = TRUE;
8712 call->signature = mono_method_signature (cmethod);
8713 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8714 call->inst.inst_p0 = cmethod;
8715 for (i = 0; i < n; ++i)
8716 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8718 mono_arch_emit_call (cfg, call);
8719 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8720 MONO_ADD_INS (bblock, (MonoInst*)call);
8722 for (i = 0; i < num_args; ++i)
8723 /* Prevent arguments from being optimized away */
8724 arg_array [i]->flags |= MONO_INST_VOLATILE;
8726 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8727 ins = (MonoInst*)call;
8728 ins->inst_p0 = cmethod;
8729 MONO_ADD_INS (bblock, ins);
8733 start_new_bblock = 1;
8738 MonoMethodSignature *fsig;
8741 token = read32 (ip + 1);
8745 //GSHAREDVT_FAILURE (*ip);
8750 fsig = mini_get_signature (method, token, generic_context);
8752 if (method->dynamic && fsig->pinvoke) {
8756 * This is a call through a function pointer using a pinvoke
8757 * signature. Have to create a wrapper and call that instead.
8758 * FIXME: This is very slow, need to create a wrapper at JIT time
8759 * instead based on the signature.
8761 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8762 EMIT_NEW_PCONST (cfg, args [1], fsig);
8764 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8767 n = fsig->param_count + fsig->hasthis;
8771 //g_assert (!virtual || fsig->hasthis);
8775 inline_costs += 10 * num_calls++;
8778 * Making generic calls out of gsharedvt methods.
8779 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8780 * patching gshared method addresses into a gsharedvt method.
8782 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8784 * We pass the address to the gsharedvt trampoline in the rgctx reg
8786 MonoInst *callee = addr;
8788 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8790 GSHAREDVT_FAILURE (*ip);
8792 addr = emit_get_rgctx_sig (cfg, context_used,
8793 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8794 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8798 /* Prevent inlining of methods with indirect calls */
8799 INLINE_FAILURE ("indirect call");
8801 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8806 * Instead of emitting an indirect call, emit a direct call
8807 * with the contents of the aotconst as the patch info.
8809 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8810 info_type = addr->inst_c1;
8811 info_data = addr->inst_p0;
8813 info_type = addr->inst_right->inst_c1;
8814 info_data = addr->inst_right->inst_left;
8817 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8818 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8823 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8827 /* End of call, INS should contain the result of the call, if any */
8829 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8831 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8834 CHECK_CFG_EXCEPTION;
8838 constrained_class = NULL;
8842 case CEE_CALLVIRT: {
8843 MonoInst *addr = NULL;
8844 MonoMethodSignature *fsig = NULL;
8846 int virtual = *ip == CEE_CALLVIRT;
8847 gboolean pass_imt_from_rgctx = FALSE;
8848 MonoInst *imt_arg = NULL;
8849 MonoInst *keep_this_alive = NULL;
8850 gboolean pass_vtable = FALSE;
8851 gboolean pass_mrgctx = FALSE;
8852 MonoInst *vtable_arg = NULL;
8853 gboolean check_this = FALSE;
8854 gboolean supported_tail_call = FALSE;
8855 gboolean tail_call = FALSE;
8856 gboolean need_seq_point = FALSE;
8857 guint32 call_opcode = *ip;
8858 gboolean emit_widen = TRUE;
8859 gboolean push_res = TRUE;
8860 gboolean skip_ret = FALSE;
8861 gboolean delegate_invoke = FALSE;
8862 gboolean direct_icall = FALSE;
8863 gboolean constrained_partial_call = FALSE;
8864 MonoMethod *cil_method;
8867 token = read32 (ip + 1);
8871 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8872 cil_method = cmethod;
8874 if (constrained_class) {
8875 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8876 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8877 g_assert (!cmethod->klass->valuetype);
8878 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8879 constrained_partial_call = TRUE;
8883 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8884 if (cfg->verbose_level > 2)
8885 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8886 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8887 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8888 cfg->generic_sharing_context)) {
8889 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8893 if (cfg->verbose_level > 2)
8894 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8896 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8898 * This is needed since get_method_constrained can't find
8899 * the method in klass representing a type var.
8900 * The type var is guaranteed to be a reference type in this
8903 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8904 g_assert (!cmethod->klass->valuetype);
8906 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8912 if (!cmethod || mono_loader_get_last_error ())
8914 if (!dont_verify && !cfg->skip_visibility) {
8915 MonoMethod *target_method = cil_method;
8916 if (method->is_inflated) {
8917 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8919 if (!mono_method_can_access_method (method_definition, target_method) &&
8920 !mono_method_can_access_method (method, cil_method))
8921 METHOD_ACCESS_FAILURE (method, cil_method);
8924 if (mono_security_core_clr_enabled ())
8925 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8927 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8928 /* MS.NET seems to silently convert this to a callvirt */
8933 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8934 * converts to a callvirt.
8936 * tests/bug-515884.il is an example of this behavior
8938 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8939 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8940 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8944 if (!cmethod->klass->inited)
8945 if (!mono_class_init (cmethod->klass))
8946 TYPE_LOAD_ERROR (cmethod->klass);
8948 fsig = mono_method_signature (cmethod);
8951 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8952 mini_class_is_system_array (cmethod->klass)) {
8953 array_rank = cmethod->klass->rank;
8954 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8955 direct_icall = TRUE;
8956 } else if (fsig->pinvoke) {
8957 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8958 check_for_pending_exc, cfg->compile_aot);
8959 fsig = mono_method_signature (wrapper);
8960 } else if (constrained_class) {
8962 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8966 mono_save_token_info (cfg, image, token, cil_method);
8968 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8969 need_seq_point = TRUE;
8971 /* Don't support calls made using type arguments for now */
8973 if (cfg->gsharedvt) {
8974 if (mini_is_gsharedvt_signature (cfg, fsig))
8975 GSHAREDVT_FAILURE (*ip);
8979 if (mono_security_cas_enabled ()) {
8980 if (check_linkdemand (cfg, method, cmethod))
8981 INLINE_FAILURE ("linkdemand");
8982 CHECK_CFG_EXCEPTION;
8985 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8986 g_assert_not_reached ();
8988 n = fsig->param_count + fsig->hasthis;
8990 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8993 if (!cfg->generic_sharing_context)
8994 g_assert (!mono_method_check_context_used (cmethod));
8998 //g_assert (!virtual || fsig->hasthis);
9002 if (constrained_class) {
9003 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
9004 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9005 /* The 'Own method' case below */
9006 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9007 /* 'The type parameter is instantiated as a reference type' case below. */
9009 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
9010 CHECK_CFG_EXCEPTION;
9017 * We have the `constrained.' prefix opcode.
9019 if (constrained_partial_call) {
9020 gboolean need_box = TRUE;
9023 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9024 * called method is not known at compile time either. The called method could end up being
9025 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9026 * to box the receiver.
9027 * A simple solution would be to box always and make a normal virtual call, but that would
9028 * be bad performance wise.
9030 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9032 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9039 MonoBasicBlock *is_ref_bb, *end_bb;
9040 MonoInst *nonbox_call;
9043 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9045 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9046 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9048 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9050 NEW_BBLOCK (cfg, is_ref_bb);
9051 NEW_BBLOCK (cfg, end_bb);
9053 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9058 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9060 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9063 MONO_START_BB (cfg, is_ref_bb);
9064 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9065 ins->klass = constrained_class;
9066 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9067 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9069 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9071 MONO_START_BB (cfg, end_bb);
9074 nonbox_call->dreg = ins->dreg;
9076 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9077 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9078 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9081 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9083 * The type parameter is instantiated as a valuetype,
9084 * but that type doesn't override the method we're
9085 * calling, so we need to box `this'.
9087 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9088 ins->klass = constrained_class;
9089 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9090 CHECK_CFG_EXCEPTION;
9091 } else if (!constrained_class->valuetype) {
9092 int dreg = alloc_ireg_ref (cfg);
9095 * The type parameter is instantiated as a reference
9096 * type. We have a managed pointer on the stack, so
9097 * we need to dereference it here.
9099 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9100 ins->type = STACK_OBJ;
9103 if (cmethod->klass->valuetype) {
9106 /* Interface method */
9109 mono_class_setup_vtable (constrained_class);
9110 CHECK_TYPELOAD (constrained_class);
9111 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9113 TYPE_LOAD_ERROR (constrained_class);
9114 slot = mono_method_get_vtable_slot (cmethod);
9116 TYPE_LOAD_ERROR (cmethod->klass);
9117 cmethod = constrained_class->vtable [ioffset + slot];
9119 if (cmethod->klass == mono_defaults.enum_class) {
9120 /* Enum implements some interfaces, so treat this as the first case */
9121 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9122 ins->klass = constrained_class;
9123 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9124 CHECK_CFG_EXCEPTION;
9129 constrained_class = NULL;
9132 if (check_call_signature (cfg, fsig, sp))
9135 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
9136 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9137 delegate_invoke = TRUE;
9140 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9142 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9143 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9151 * If the callee is a shared method, then its static cctor
9152 * might not get called after the call was patched.
9154 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9155 emit_generic_class_init (cfg, cmethod->klass);
9156 CHECK_TYPELOAD (cmethod->klass);
9159 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9161 if (cfg->generic_sharing_context) {
9162 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9164 context_used = mini_method_check_context_used (cfg, cmethod);
9166 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9167 /* Generic method interface
9168 calls are resolved via a
9169 helper function and don't
9171 if (!cmethod_context || !cmethod_context->method_inst)
9172 pass_imt_from_rgctx = TRUE;
9176 * If a shared method calls another
9177 * shared method then the caller must
9178 * have a generic sharing context
9179 * because the magic trampoline
9180 * requires it. FIXME: We shouldn't
9181 * have to force the vtable/mrgctx
9182 * variable here. Instead there
9183 * should be a flag in the cfg to
9184 * request a generic sharing context.
9187 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9188 mono_get_vtable_var (cfg);
9193 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9195 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9197 CHECK_TYPELOAD (cmethod->klass);
9198 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9203 g_assert (!vtable_arg);
9205 if (!cfg->compile_aot) {
9207 * emit_get_rgctx_method () calls mono_class_vtable () so check
9208 * for type load errors before.
9210 mono_class_setup_vtable (cmethod->klass);
9211 CHECK_TYPELOAD (cmethod->klass);
9214 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9216 /* !marshalbyref is needed to properly handle generic methods + remoting */
9217 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9218 MONO_METHOD_IS_FINAL (cmethod)) &&
9219 !mono_class_is_marshalbyref (cmethod->klass)) {
9226 if (pass_imt_from_rgctx) {
9227 g_assert (!pass_vtable);
9229 imt_arg = emit_get_rgctx_method (cfg, context_used,
9230 cmethod, MONO_RGCTX_INFO_METHOD);
9234 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9236 /* Calling virtual generic methods */
9237 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9238 !(MONO_METHOD_IS_FINAL (cmethod) &&
9239 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9240 fsig->generic_param_count &&
9241 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9242 MonoInst *this_temp, *this_arg_temp, *store;
9243 MonoInst *iargs [4];
9244 gboolean use_imt = FALSE;
9246 g_assert (fsig->is_inflated);
9248 /* Prevent inlining of methods that contain indirect calls */
9249 INLINE_FAILURE ("virtual generic call");
9251 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9252 GSHAREDVT_FAILURE (*ip);
9254 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9255 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9260 g_assert (!imt_arg);
9262 g_assert (cmethod->is_inflated);
9263 imt_arg = emit_get_rgctx_method (cfg, context_used,
9264 cmethod, MONO_RGCTX_INFO_METHOD);
9265 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9267 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9268 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9269 MONO_ADD_INS (bblock, store);
9271 /* FIXME: This should be a managed pointer */
9272 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9274 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9275 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9276 cmethod, MONO_RGCTX_INFO_METHOD);
9277 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9278 addr = mono_emit_jit_icall (cfg,
9279 mono_helper_compile_generic_method, iargs);
9281 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9283 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9290 * Implement a workaround for the inherent races involved in locking:
9296 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9297 * try block, the Exit () won't be executed, see:
9298 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9299 * To work around this, we extend such try blocks to include the last x bytes
9300 * of the Monitor.Enter () call.
9302 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9303 MonoBasicBlock *tbb;
9305 GET_BBLOCK (cfg, tbb, ip + 5);
9307 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9308 * from Monitor.Enter like ArgumentNullException.
9310 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9311 /* Mark this bblock as needing to be extended */
9312 tbb->extend_try_block = TRUE;
9316 /* Conversion to a JIT intrinsic */
9317 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9319 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9320 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9327 if ((cfg->opt & MONO_OPT_INLINE) &&
9328 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9329 mono_method_check_inlining (cfg, cmethod)) {
9331 gboolean always = FALSE;
9333 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9334 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9335 /* Prevent inlining of methods that call wrappers */
9336 INLINE_FAILURE ("wrapper call");
9337 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9341 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9343 cfg->real_offset += 5;
9345 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9346 /* *sp is already set by inline_method */
9351 inline_costs += costs;
9357 /* Tail recursion elimination */
9358 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9359 gboolean has_vtargs = FALSE;
9362 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9363 INLINE_FAILURE ("tail call");
9365 /* keep it simple */
9366 for (i = fsig->param_count - 1; i >= 0; i--) {
9367 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9372 for (i = 0; i < n; ++i)
9373 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9374 MONO_INST_NEW (cfg, ins, OP_BR);
9375 MONO_ADD_INS (bblock, ins);
9376 tblock = start_bblock->out_bb [0];
9377 link_bblock (cfg, bblock, tblock);
9378 ins->inst_target_bb = tblock;
9379 start_new_bblock = 1;
9381 /* skip the CEE_RET, too */
9382 if (ip_in_bb (cfg, bblock, ip + 5))
9389 inline_costs += 10 * num_calls++;
9392 * Making generic calls out of gsharedvt methods.
9393 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9394 * patching gshared method addresses into a gsharedvt method.
9396 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9397 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9398 MonoRgctxInfoType info_type;
9401 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9402 //GSHAREDVT_FAILURE (*ip);
9403 // disable for possible remoting calls
9404 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9405 GSHAREDVT_FAILURE (*ip);
9406 if (fsig->generic_param_count) {
9407 /* virtual generic call */
9408 g_assert (!imt_arg);
9409 /* Same as the virtual generic case above */
9410 imt_arg = emit_get_rgctx_method (cfg, context_used,
9411 cmethod, MONO_RGCTX_INFO_METHOD);
9412 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9414 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9415 /* This can happen when we call a fully instantiated iface method */
9416 imt_arg = emit_get_rgctx_method (cfg, context_used,
9417 cmethod, MONO_RGCTX_INFO_METHOD);
9422 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9423 keep_this_alive = sp [0];
9425 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9426 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9428 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9429 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9431 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9435 /* Generic sharing */
9438 * Use this if the callee is gsharedvt sharable too, since
9439 * at runtime we might find an instantiation so the call cannot
9440 * be patched (the 'no_patch' code path in mini-trampolines.c).
9442 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9443 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9444 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9445 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9446 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9447 INLINE_FAILURE ("gshared");
9449 g_assert (cfg->generic_sharing_context && cmethod);
9453 * We are compiling a call to a
9454 * generic method from shared code,
9455 * which means that we have to look up
9456 * the method in the rgctx and do an
9460 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9462 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9463 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9467 /* Direct calls to icalls */
9469 MonoMethod *wrapper;
9472 /* Inline the wrapper */
9473 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9475 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9476 g_assert (costs > 0);
9477 cfg->real_offset += 5;
9479 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9480 /* *sp is already set by inline_method */
9485 inline_costs += costs;
9494 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9495 MonoInst *val = sp [fsig->param_count];
9497 if (val->type == STACK_OBJ) {
9498 MonoInst *iargs [2];
9503 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9506 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9507 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9508 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9509 emit_write_barrier (cfg, addr, val);
9510 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9511 GSHAREDVT_FAILURE (*ip);
9512 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9513 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9515 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9516 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9517 if (!cmethod->klass->element_class->valuetype && !readonly)
9518 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9519 CHECK_TYPELOAD (cmethod->klass);
9522 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9525 g_assert_not_reached ();
9532 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9536 /* Tail prefix / tail call optimization */
9538 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9539 /* FIXME: runtime generic context pointer for jumps? */
9540 /* FIXME: handle this for generic sharing eventually */
9541 if ((ins_flag & MONO_INST_TAILCALL) &&
9542 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9543 supported_tail_call = TRUE;
9545 if (supported_tail_call) {
9548 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9549 INLINE_FAILURE ("tail call");
9551 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9553 if (ARCH_HAVE_OP_TAIL_CALL) {
9554 /* Handle tail calls similarly to normal calls */
9557 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9559 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9560 call->tail_call = TRUE;
9561 call->method = cmethod;
9562 call->signature = mono_method_signature (cmethod);
9565 * We implement tail calls by storing the actual arguments into the
9566 * argument variables, then emitting a CEE_JMP.
9568 for (i = 0; i < n; ++i) {
9569 /* Prevent argument from being register allocated */
9570 arg_array [i]->flags |= MONO_INST_VOLATILE;
9571 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9573 ins = (MonoInst*)call;
9574 ins->inst_p0 = cmethod;
9575 ins->inst_p1 = arg_array [0];
9576 MONO_ADD_INS (bblock, ins);
9577 link_bblock (cfg, bblock, end_bblock);
9578 start_new_bblock = 1;
9580 // FIXME: Eliminate unreachable epilogs
9583 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9584 * only reachable from this call.
9586 GET_BBLOCK (cfg, tblock, ip + 5);
9587 if (tblock == bblock || tblock->in_count == 0)
9596 * Synchronized wrappers.
9597 * Its hard to determine where to replace a method with its synchronized
9598 * wrapper without causing an infinite recursion. The current solution is
9599 * to add the synchronized wrapper in the trampolines, and to
9600 * change the called method to a dummy wrapper, and resolve that wrapper
9601 * to the real method in mono_jit_compile_method ().
9603 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9604 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9605 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9606 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9610 INLINE_FAILURE ("call");
9611 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9612 imt_arg, vtable_arg);
9615 link_bblock (cfg, bblock, end_bblock);
9616 start_new_bblock = 1;
9618 // FIXME: Eliminate unreachable epilogs
9621 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9622 * only reachable from this call.
9624 GET_BBLOCK (cfg, tblock, ip + 5);
9625 if (tblock == bblock || tblock->in_count == 0)
9632 /* End of call, INS should contain the result of the call, if any */
9634 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9637 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9642 if (keep_this_alive) {
9643 MonoInst *dummy_use;
9645 /* See mono_emit_method_call_full () */
9646 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9649 CHECK_CFG_EXCEPTION;
9653 g_assert (*ip == CEE_RET);
9657 constrained_class = NULL;
9659 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9663 if (cfg->method != method) {
9664 /* return from inlined method */
9666 * If in_count == 0, that means the ret is unreachable due to
9667 * being preceeded by a throw. In that case, inline_method () will
9668 * handle setting the return value
9669 * (test case: test_0_inline_throw ()).
9671 if (return_var && cfg->cbb->in_count) {
9672 MonoType *ret_type = mono_method_signature (method)->ret;
9678 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9681 //g_assert (returnvar != -1);
9682 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9683 cfg->ret_var_set = TRUE;
9686 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9688 if (cfg->lmf_var && cfg->cbb->in_count)
9692 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9694 if (seq_points && !sym_seq_points) {
9696 * Place a seq point here too even through the IL stack is not
9697 * empty, so a step over on
9700 * will work correctly.
9702 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9703 MONO_ADD_INS (cfg->cbb, ins);
9706 g_assert (!return_var);
9710 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9713 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9716 if (!cfg->vret_addr) {
9719 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9721 EMIT_NEW_RETLOADA (cfg, ret_addr);
9723 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9724 ins->klass = mono_class_from_mono_type (ret_type);
9727 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9728 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9729 MonoInst *iargs [1];
9733 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9734 mono_arch_emit_setret (cfg, method, conv);
9736 mono_arch_emit_setret (cfg, method, *sp);
9739 mono_arch_emit_setret (cfg, method, *sp);
9744 if (sp != stack_start)
9746 MONO_INST_NEW (cfg, ins, OP_BR);
9748 ins->inst_target_bb = end_bblock;
9749 MONO_ADD_INS (bblock, ins);
9750 link_bblock (cfg, bblock, end_bblock);
9751 start_new_bblock = 1;
9755 MONO_INST_NEW (cfg, ins, OP_BR);
9757 target = ip + 1 + (signed char)(*ip);
9759 GET_BBLOCK (cfg, tblock, target);
9760 link_bblock (cfg, bblock, tblock);
9761 ins->inst_target_bb = tblock;
9762 if (sp != stack_start) {
9763 handle_stack_args (cfg, stack_start, sp - stack_start);
9765 CHECK_UNVERIFIABLE (cfg);
9767 MONO_ADD_INS (bblock, ins);
9768 start_new_bblock = 1;
9769 inline_costs += BRANCH_COST;
9783 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9785 target = ip + 1 + *(signed char*)ip;
9791 inline_costs += BRANCH_COST;
9795 MONO_INST_NEW (cfg, ins, OP_BR);
9798 target = ip + 4 + (gint32)read32(ip);
9800 GET_BBLOCK (cfg, tblock, target);
9801 link_bblock (cfg, bblock, tblock);
9802 ins->inst_target_bb = tblock;
9803 if (sp != stack_start) {
9804 handle_stack_args (cfg, stack_start, sp - stack_start);
9806 CHECK_UNVERIFIABLE (cfg);
9809 MONO_ADD_INS (bblock, ins);
9811 start_new_bblock = 1;
9812 inline_costs += BRANCH_COST;
9819 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9820 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9821 guint32 opsize = is_short ? 1 : 4;
9823 CHECK_OPSIZE (opsize);
9825 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9828 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9833 GET_BBLOCK (cfg, tblock, target);
9834 link_bblock (cfg, bblock, tblock);
9835 GET_BBLOCK (cfg, tblock, ip);
9836 link_bblock (cfg, bblock, tblock);
9838 if (sp != stack_start) {
9839 handle_stack_args (cfg, stack_start, sp - stack_start);
9840 CHECK_UNVERIFIABLE (cfg);
9843 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9844 cmp->sreg1 = sp [0]->dreg;
9845 type_from_op (cfg, cmp, sp [0], NULL);
9848 #if SIZEOF_REGISTER == 4
9849 if (cmp->opcode == OP_LCOMPARE_IMM) {
9850 /* Convert it to OP_LCOMPARE */
9851 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9852 ins->type = STACK_I8;
9853 ins->dreg = alloc_dreg (cfg, STACK_I8);
9855 MONO_ADD_INS (bblock, ins);
9856 cmp->opcode = OP_LCOMPARE;
9857 cmp->sreg2 = ins->dreg;
9860 MONO_ADD_INS (bblock, cmp);
9862 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9863 type_from_op (cfg, ins, sp [0], NULL);
9864 MONO_ADD_INS (bblock, ins);
9865 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9866 GET_BBLOCK (cfg, tblock, target);
9867 ins->inst_true_bb = tblock;
9868 GET_BBLOCK (cfg, tblock, ip);
9869 ins->inst_false_bb = tblock;
9870 start_new_bblock = 2;
9873 inline_costs += BRANCH_COST;
9888 MONO_INST_NEW (cfg, ins, *ip);
9890 target = ip + 4 + (gint32)read32(ip);
9896 inline_costs += BRANCH_COST;
9900 MonoBasicBlock **targets;
9901 MonoBasicBlock *default_bblock;
9902 MonoJumpInfoBBTable *table;
9903 int offset_reg = alloc_preg (cfg);
9904 int target_reg = alloc_preg (cfg);
9905 int table_reg = alloc_preg (cfg);
9906 int sum_reg = alloc_preg (cfg);
9907 gboolean use_op_switch;
9911 n = read32 (ip + 1);
9914 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9918 CHECK_OPSIZE (n * sizeof (guint32));
9919 target = ip + n * sizeof (guint32);
9921 GET_BBLOCK (cfg, default_bblock, target);
9922 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9924 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9925 for (i = 0; i < n; ++i) {
9926 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9927 targets [i] = tblock;
9928 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9932 if (sp != stack_start) {
9934 * Link the current bb with the targets as well, so handle_stack_args
9935 * will set their in_stack correctly.
9937 link_bblock (cfg, bblock, default_bblock);
9938 for (i = 0; i < n; ++i)
9939 link_bblock (cfg, bblock, targets [i]);
9941 handle_stack_args (cfg, stack_start, sp - stack_start);
9943 CHECK_UNVERIFIABLE (cfg);
9946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9950 for (i = 0; i < n; ++i)
9951 link_bblock (cfg, bblock, targets [i]);
9953 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9954 table->table = targets;
9955 table->table_size = n;
9957 use_op_switch = FALSE;
9959 /* ARM implements SWITCH statements differently */
9960 /* FIXME: Make it use the generic implementation */
9961 if (!cfg->compile_aot)
9962 use_op_switch = TRUE;
9965 if (COMPILE_LLVM (cfg))
9966 use_op_switch = TRUE;
9968 cfg->cbb->has_jump_table = 1;
9970 if (use_op_switch) {
9971 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9972 ins->sreg1 = src1->dreg;
9973 ins->inst_p0 = table;
9974 ins->inst_many_bb = targets;
9975 ins->klass = GUINT_TO_POINTER (n);
9976 MONO_ADD_INS (cfg->cbb, ins);
9978 if (sizeof (gpointer) == 8)
9979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9983 #if SIZEOF_REGISTER == 8
9984 /* The upper word might not be zero, and we add it to a 64 bit address later */
9985 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9988 if (cfg->compile_aot) {
9989 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9991 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9992 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9993 ins->inst_p0 = table;
9994 ins->dreg = table_reg;
9995 MONO_ADD_INS (cfg->cbb, ins);
9998 /* FIXME: Use load_memindex */
9999 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10001 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10003 start_new_bblock = 1;
10004 inline_costs += (BRANCH_COST * 2);
10017 case CEE_LDIND_REF:
10024 dreg = alloc_freg (cfg);
10027 dreg = alloc_lreg (cfg);
10029 case CEE_LDIND_REF:
10030 dreg = alloc_ireg_ref (cfg);
10033 dreg = alloc_preg (cfg);
10036 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10037 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10038 if (*ip == CEE_LDIND_R4)
10039 ins->type = cfg->r4_stack_type;
10040 ins->flags |= ins_flag;
10041 MONO_ADD_INS (bblock, ins);
10043 if (ins_flag & MONO_INST_VOLATILE) {
10044 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10045 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10050 case CEE_STIND_REF:
10061 if (ins_flag & MONO_INST_VOLATILE) {
10062 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10063 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10066 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10067 ins->flags |= ins_flag;
10070 MONO_ADD_INS (bblock, ins);
10072 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10073 emit_write_barrier (cfg, sp [0], sp [1]);
10082 MONO_INST_NEW (cfg, ins, (*ip));
10084 ins->sreg1 = sp [0]->dreg;
10085 ins->sreg2 = sp [1]->dreg;
10086 type_from_op (cfg, ins, sp [0], sp [1]);
10088 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10090 /* Use the immediate opcodes if possible */
10091 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10092 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10093 if (imm_opcode != -1) {
10094 ins->opcode = imm_opcode;
10095 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10098 NULLIFY_INS (sp [1]);
10102 MONO_ADD_INS ((cfg)->cbb, (ins));
10104 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10121 MONO_INST_NEW (cfg, ins, (*ip));
10123 ins->sreg1 = sp [0]->dreg;
10124 ins->sreg2 = sp [1]->dreg;
10125 type_from_op (cfg, ins, sp [0], sp [1]);
10127 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10128 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10130 /* FIXME: Pass opcode to is_inst_imm */
10132 /* Use the immediate opcodes if possible */
10133 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10136 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10137 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10138 /* Keep emulated opcodes which are optimized away later */
10139 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10140 imm_opcode = mono_op_to_op_imm (ins->opcode);
10143 if (imm_opcode != -1) {
10144 ins->opcode = imm_opcode;
10145 if (sp [1]->opcode == OP_I8CONST) {
10146 #if SIZEOF_REGISTER == 8
10147 ins->inst_imm = sp [1]->inst_l;
10149 ins->inst_ls_word = sp [1]->inst_ls_word;
10150 ins->inst_ms_word = sp [1]->inst_ms_word;
10154 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10157 /* Might be followed by an instruction added by add_widen_op */
10158 if (sp [1]->next == NULL)
10159 NULLIFY_INS (sp [1]);
10162 MONO_ADD_INS ((cfg)->cbb, (ins));
10164 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10177 case CEE_CONV_OVF_I8:
10178 case CEE_CONV_OVF_U8:
10179 case CEE_CONV_R_UN:
10182 /* Special case this earlier so we have long constants in the IR */
10183 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10184 int data = sp [-1]->inst_c0;
10185 sp [-1]->opcode = OP_I8CONST;
10186 sp [-1]->type = STACK_I8;
10187 #if SIZEOF_REGISTER == 8
10188 if ((*ip) == CEE_CONV_U8)
10189 sp [-1]->inst_c0 = (guint32)data;
10191 sp [-1]->inst_c0 = data;
10193 sp [-1]->inst_ls_word = data;
10194 if ((*ip) == CEE_CONV_U8)
10195 sp [-1]->inst_ms_word = 0;
10197 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10199 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10206 case CEE_CONV_OVF_I4:
10207 case CEE_CONV_OVF_I1:
10208 case CEE_CONV_OVF_I2:
10209 case CEE_CONV_OVF_I:
10210 case CEE_CONV_OVF_U:
10213 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10214 ADD_UNOP (CEE_CONV_OVF_I8);
10221 case CEE_CONV_OVF_U1:
10222 case CEE_CONV_OVF_U2:
10223 case CEE_CONV_OVF_U4:
10226 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10227 ADD_UNOP (CEE_CONV_OVF_U8);
10234 case CEE_CONV_OVF_I1_UN:
10235 case CEE_CONV_OVF_I2_UN:
10236 case CEE_CONV_OVF_I4_UN:
10237 case CEE_CONV_OVF_I8_UN:
10238 case CEE_CONV_OVF_U1_UN:
10239 case CEE_CONV_OVF_U2_UN:
10240 case CEE_CONV_OVF_U4_UN:
10241 case CEE_CONV_OVF_U8_UN:
10242 case CEE_CONV_OVF_I_UN:
10243 case CEE_CONV_OVF_U_UN:
10250 CHECK_CFG_EXCEPTION;
10254 case CEE_ADD_OVF_UN:
10256 case CEE_MUL_OVF_UN:
10258 case CEE_SUB_OVF_UN:
10264 GSHAREDVT_FAILURE (*ip);
10267 token = read32 (ip + 1);
10268 klass = mini_get_class (method, token, generic_context);
10269 CHECK_TYPELOAD (klass);
10271 if (generic_class_is_reference_type (cfg, klass)) {
10272 MonoInst *store, *load;
10273 int dreg = alloc_ireg_ref (cfg);
10275 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10276 load->flags |= ins_flag;
10277 MONO_ADD_INS (cfg->cbb, load);
10279 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10280 store->flags |= ins_flag;
10281 MONO_ADD_INS (cfg->cbb, store);
10283 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10284 emit_write_barrier (cfg, sp [0], sp [1]);
10286 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10292 int loc_index = -1;
10298 token = read32 (ip + 1);
10299 klass = mini_get_class (method, token, generic_context);
10300 CHECK_TYPELOAD (klass);
10302 /* Optimize the common ldobj+stloc combination */
10305 loc_index = ip [6];
10312 loc_index = ip [5] - CEE_STLOC_0;
10319 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10320 CHECK_LOCAL (loc_index);
10322 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10323 ins->dreg = cfg->locals [loc_index]->dreg;
10324 ins->flags |= ins_flag;
10327 if (ins_flag & MONO_INST_VOLATILE) {
10328 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10329 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10335 /* Optimize the ldobj+stobj combination */
10336 /* The reference case ends up being a load+store anyway */
10337 /* Skip this if the operation is volatile. */
10338 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10343 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10350 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10351 ins->flags |= ins_flag;
10354 if (ins_flag & MONO_INST_VOLATILE) {
10355 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10356 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10365 CHECK_STACK_OVF (1);
10367 n = read32 (ip + 1);
10369 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10370 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10371 ins->type = STACK_OBJ;
10374 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10375 MonoInst *iargs [1];
10376 char *str = mono_method_get_wrapper_data (method, n);
10378 if (cfg->compile_aot)
10379 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10381 EMIT_NEW_PCONST (cfg, iargs [0], str);
10382 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10384 if (cfg->opt & MONO_OPT_SHARED) {
10385 MonoInst *iargs [3];
10387 if (cfg->compile_aot) {
10388 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10390 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10391 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10392 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10393 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10394 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10396 if (bblock->out_of_line) {
10397 MonoInst *iargs [2];
10399 if (image == mono_defaults.corlib) {
10401 * Avoid relocations in AOT and save some space by using a
10402 * version of helper_ldstr specialized to mscorlib.
10404 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10405 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10407 /* Avoid creating the string object */
10408 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10409 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10410 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10414 if (cfg->compile_aot) {
10415 NEW_LDSTRCONST (cfg, ins, image, n);
10417 MONO_ADD_INS (bblock, ins);
10420 NEW_PCONST (cfg, ins, NULL);
10421 ins->type = STACK_OBJ;
10422 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10424 OUT_OF_MEMORY_FAILURE;
10427 MONO_ADD_INS (bblock, ins);
10436 MonoInst *iargs [2];
10437 MonoMethodSignature *fsig;
10440 MonoInst *vtable_arg = NULL;
10443 token = read32 (ip + 1);
10444 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10445 if (!cmethod || mono_loader_get_last_error ())
10447 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10450 mono_save_token_info (cfg, image, token, cmethod);
10452 if (!mono_class_init (cmethod->klass))
10453 TYPE_LOAD_ERROR (cmethod->klass);
10455 context_used = mini_method_check_context_used (cfg, cmethod);
10457 if (mono_security_cas_enabled ()) {
10458 if (check_linkdemand (cfg, method, cmethod))
10459 INLINE_FAILURE ("linkdemand");
10460 CHECK_CFG_EXCEPTION;
10461 } else if (mono_security_core_clr_enabled ()) {
10462 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10465 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10466 emit_generic_class_init (cfg, cmethod->klass);
10467 CHECK_TYPELOAD (cmethod->klass);
10471 if (cfg->gsharedvt) {
10472 if (mini_is_gsharedvt_variable_signature (sig))
10473 GSHAREDVT_FAILURE (*ip);
10477 n = fsig->param_count;
10481 * Generate smaller code for the common newobj <exception> instruction in
10482 * argument checking code.
10484 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10485 is_exception_class (cmethod->klass) && n <= 2 &&
10486 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10487 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10488 MonoInst *iargs [3];
10492 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10495 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10498 iargs [1] = sp [0];
10499 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10502 iargs [1] = sp [0];
10503 iargs [2] = sp [1];
10504 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10507 g_assert_not_reached ();
10515 /* move the args to allow room for 'this' in the first position */
10521 /* check_call_signature () requires sp[0] to be set */
10522 this_ins.type = STACK_OBJ;
10523 sp [0] = &this_ins;
10524 if (check_call_signature (cfg, fsig, sp))
10529 if (mini_class_is_system_array (cmethod->klass)) {
10530 *sp = emit_get_rgctx_method (cfg, context_used,
10531 cmethod, MONO_RGCTX_INFO_METHOD);
10533 /* Avoid varargs in the common case */
10534 if (fsig->param_count == 1)
10535 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10536 else if (fsig->param_count == 2)
10537 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10538 else if (fsig->param_count == 3)
10539 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10540 else if (fsig->param_count == 4)
10541 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10543 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10544 } else if (cmethod->string_ctor) {
10545 g_assert (!context_used);
10546 g_assert (!vtable_arg);
10547 /* we simply pass a null pointer */
10548 EMIT_NEW_PCONST (cfg, *sp, NULL);
10549 /* now call the string ctor */
10550 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10552 if (cmethod->klass->valuetype) {
10553 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10554 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10555 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10560 * The code generated by mini_emit_virtual_call () expects
10561 * iargs [0] to be a boxed instance, but luckily the vcall
10562 * will be transformed into a normal call there.
10564 } else if (context_used) {
10565 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10568 MonoVTable *vtable = NULL;
10570 if (!cfg->compile_aot)
10571 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10572 CHECK_TYPELOAD (cmethod->klass);
10575 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10576 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10577 * As a workaround, we call class cctors before allocating objects.
10579 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10580 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10581 if (cfg->verbose_level > 2)
10582 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10583 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10586 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10589 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10592 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10594 /* Now call the actual ctor */
10595 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10596 CHECK_CFG_EXCEPTION;
10599 if (alloc == NULL) {
10601 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10602 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10610 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10611 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10614 case CEE_CASTCLASS:
10618 token = read32 (ip + 1);
10619 klass = mini_get_class (method, token, generic_context);
10620 CHECK_TYPELOAD (klass);
10621 if (sp [0]->type != STACK_OBJ)
10624 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10625 CHECK_CFG_EXCEPTION;
10634 token = read32 (ip + 1);
10635 klass = mini_get_class (method, token, generic_context);
10636 CHECK_TYPELOAD (klass);
10637 if (sp [0]->type != STACK_OBJ)
10640 context_used = mini_class_check_context_used (cfg, klass);
10642 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10643 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10644 MonoInst *args [3];
10651 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10654 if (cfg->compile_aot) {
10655 idx = get_castclass_cache_idx (cfg);
10656 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10658 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10661 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10664 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10665 MonoMethod *mono_isinst;
10666 MonoInst *iargs [1];
10669 mono_isinst = mono_marshal_get_isinst (klass);
10670 iargs [0] = sp [0];
10672 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10673 iargs, ip, cfg->real_offset, TRUE, &bblock);
10674 CHECK_CFG_EXCEPTION;
10675 g_assert (costs > 0);
10678 cfg->real_offset += 5;
10682 inline_costs += costs;
10685 ins = handle_isinst (cfg, klass, *sp, context_used);
10686 CHECK_CFG_EXCEPTION;
10693 case CEE_UNBOX_ANY: {
10694 MonoInst *res, *addr;
10699 token = read32 (ip + 1);
10700 klass = mini_get_class (method, token, generic_context);
10701 CHECK_TYPELOAD (klass);
10703 mono_save_token_info (cfg, image, token, klass);
10705 context_used = mini_class_check_context_used (cfg, klass);
10707 if (mini_is_gsharedvt_klass (cfg, klass)) {
10708 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10710 } else if (generic_class_is_reference_type (cfg, klass)) {
10711 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10712 CHECK_CFG_EXCEPTION;
10713 } else if (mono_class_is_nullable (klass)) {
10714 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10716 addr = handle_unbox (cfg, klass, sp, context_used);
10718 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10729 MonoClass *enum_class;
10730 MonoMethod *has_flag;
10736 token = read32 (ip + 1);
10737 klass = mini_get_class (method, token, generic_context);
10738 CHECK_TYPELOAD (klass);
10740 mono_save_token_info (cfg, image, token, klass);
10742 context_used = mini_class_check_context_used (cfg, klass);
10744 if (generic_class_is_reference_type (cfg, klass)) {
10750 if (klass == mono_defaults.void_class)
10752 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10754 /* frequent check in generic code: box (struct), brtrue */
10759 * <push int/long ptr>
10762 * constrained. MyFlags
10763 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10765 * If we find this sequence and the operand types on box and constrained
10766 * are equal, we can emit a specialized instruction sequence instead of
10767 * the very slow HasFlag () call.
10769 if ((cfg->opt & MONO_OPT_INTRINS) &&
10770 /* Cheap checks first. */
10771 ip + 5 + 6 + 5 < end &&
10772 ip [5] == CEE_PREFIX1 &&
10773 ip [6] == CEE_CONSTRAINED_ &&
10774 ip [11] == CEE_CALLVIRT &&
10775 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10776 mono_class_is_enum (klass) &&
10777 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10778 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10779 has_flag->klass == mono_defaults.enum_class &&
10780 !strcmp (has_flag->name, "HasFlag") &&
10781 has_flag->signature->hasthis &&
10782 has_flag->signature->param_count == 1) {
10783 CHECK_TYPELOAD (enum_class);
10785 if (enum_class == klass) {
10786 MonoInst *enum_this, *enum_flag;
10791 enum_this = sp [0];
10792 enum_flag = sp [1];
10794 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10799 // FIXME: LLVM can't handle the inconsistent bb linking
10800 if (!mono_class_is_nullable (klass) &&
10801 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10802 (ip [5] == CEE_BRTRUE ||
10803 ip [5] == CEE_BRTRUE_S ||
10804 ip [5] == CEE_BRFALSE ||
10805 ip [5] == CEE_BRFALSE_S)) {
10806 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10808 MonoBasicBlock *true_bb, *false_bb;
10812 if (cfg->verbose_level > 3) {
10813 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10814 printf ("<box+brtrue opt>\n");
10819 case CEE_BRFALSE_S:
10822 target = ip + 1 + (signed char)(*ip);
10829 target = ip + 4 + (gint)(read32 (ip));
10833 g_assert_not_reached ();
10837 * We need to link both bblocks, since it is needed for handling stack
10838 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10839 * Branching to only one of them would lead to inconsistencies, so
10840 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10842 GET_BBLOCK (cfg, true_bb, target);
10843 GET_BBLOCK (cfg, false_bb, ip);
10845 mono_link_bblock (cfg, cfg->cbb, true_bb);
10846 mono_link_bblock (cfg, cfg->cbb, false_bb);
10848 if (sp != stack_start) {
10849 handle_stack_args (cfg, stack_start, sp - stack_start);
10851 CHECK_UNVERIFIABLE (cfg);
10854 if (COMPILE_LLVM (cfg)) {
10855 dreg = alloc_ireg (cfg);
10856 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10857 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10859 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10861 /* The JIT can't eliminate the iconst+compare */
10862 MONO_INST_NEW (cfg, ins, OP_BR);
10863 ins->inst_target_bb = is_true ? true_bb : false_bb;
10864 MONO_ADD_INS (cfg->cbb, ins);
10867 start_new_bblock = 1;
10871 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10873 CHECK_CFG_EXCEPTION;
10882 token = read32 (ip + 1);
10883 klass = mini_get_class (method, token, generic_context);
10884 CHECK_TYPELOAD (klass);
10886 mono_save_token_info (cfg, image, token, klass);
10888 context_used = mini_class_check_context_used (cfg, klass);
10890 if (mono_class_is_nullable (klass)) {
10893 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10894 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10898 ins = handle_unbox (cfg, klass, sp, context_used);
10911 MonoClassField *field;
10912 #ifndef DISABLE_REMOTING
10916 gboolean is_instance;
10918 gpointer addr = NULL;
10919 gboolean is_special_static;
10921 MonoInst *store_val = NULL;
10922 MonoInst *thread_ins;
10925 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10927 if (op == CEE_STFLD) {
10930 store_val = sp [1];
10935 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10937 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10940 if (op == CEE_STSFLD) {
10943 store_val = sp [0];
10948 token = read32 (ip + 1);
10949 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10950 field = mono_method_get_wrapper_data (method, token);
10951 klass = field->parent;
10954 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10957 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10958 FIELD_ACCESS_FAILURE (method, field);
10959 mono_class_init (klass);
10961 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10964 /* if the class is Critical then transparent code cannot access it's fields */
10965 if (!is_instance && mono_security_core_clr_enabled ())
10966 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10968 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10969 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10970 if (mono_security_core_clr_enabled ())
10971 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10975 * LDFLD etc. is usable on static fields as well, so convert those cases to
10978 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10990 g_assert_not_reached ();
10992 is_instance = FALSE;
10995 context_used = mini_class_check_context_used (cfg, klass);
10997 /* INSTANCE CASE */
10999 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11000 if (op == CEE_STFLD) {
11001 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11003 #ifndef DISABLE_REMOTING
11004 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11005 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11006 MonoInst *iargs [5];
11008 GSHAREDVT_FAILURE (op);
11010 iargs [0] = sp [0];
11011 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11012 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11013 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11015 iargs [4] = sp [1];
11017 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11018 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11019 iargs, ip, cfg->real_offset, TRUE, &bblock);
11020 CHECK_CFG_EXCEPTION;
11021 g_assert (costs > 0);
11023 cfg->real_offset += 5;
11025 inline_costs += costs;
11027 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11034 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11036 if (mini_is_gsharedvt_klass (cfg, klass)) {
11037 MonoInst *offset_ins;
11039 context_used = mini_class_check_context_used (cfg, klass);
11041 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11042 dreg = alloc_ireg_mp (cfg);
11043 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11044 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11045 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11047 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11049 if (sp [0]->opcode != OP_LDADDR)
11050 store->flags |= MONO_INST_FAULT;
11052 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11053 /* insert call to write barrier */
11057 dreg = alloc_ireg_mp (cfg);
11058 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11059 emit_write_barrier (cfg, ptr, sp [1]);
11062 store->flags |= ins_flag;
11069 #ifndef DISABLE_REMOTING
11070 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11071 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11072 MonoInst *iargs [4];
11074 GSHAREDVT_FAILURE (op);
11076 iargs [0] = sp [0];
11077 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11078 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11079 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11080 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11081 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11082 iargs, ip, cfg->real_offset, TRUE, &bblock);
11083 CHECK_CFG_EXCEPTION;
11084 g_assert (costs > 0);
11086 cfg->real_offset += 5;
11090 inline_costs += costs;
11092 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11098 if (sp [0]->type == STACK_VTYPE) {
11101 /* Have to compute the address of the variable */
11103 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11105 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11107 g_assert (var->klass == klass);
11109 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11113 if (op == CEE_LDFLDA) {
11114 if (is_magic_tls_access (field)) {
11115 GSHAREDVT_FAILURE (*ip);
11117 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
11119 if (sp [0]->type == STACK_OBJ) {
11120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11121 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11124 dreg = alloc_ireg_mp (cfg);
11126 if (mini_is_gsharedvt_klass (cfg, klass)) {
11127 MonoInst *offset_ins;
11129 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11130 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11132 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11134 ins->klass = mono_class_from_mono_type (field->type);
11135 ins->type = STACK_MP;
11141 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11143 if (mini_is_gsharedvt_klass (cfg, klass)) {
11144 MonoInst *offset_ins;
11146 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11147 dreg = alloc_ireg_mp (cfg);
11148 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11149 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11153 load->flags |= ins_flag;
11154 if (sp [0]->opcode != OP_LDADDR)
11155 load->flags |= MONO_INST_FAULT;
11169 * We can only support shared generic static
11170 * field access on architectures where the
11171 * trampoline code has been extended to handle
11172 * the generic class init.
11174 #ifndef MONO_ARCH_VTABLE_REG
11175 GENERIC_SHARING_FAILURE (op);
11178 context_used = mini_class_check_context_used (cfg, klass);
11180 ftype = mono_field_get_type (field);
11182 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11185 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11186 * to be called here.
11188 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11189 mono_class_vtable (cfg->domain, klass);
11190 CHECK_TYPELOAD (klass);
11192 mono_domain_lock (cfg->domain);
11193 if (cfg->domain->special_static_fields)
11194 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11195 mono_domain_unlock (cfg->domain);
11197 is_special_static = mono_class_field_is_special_static (field);
11199 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11200 thread_ins = mono_get_thread_intrinsic (cfg);
11204 /* Generate IR to compute the field address */
11205 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11207 * Fast access to TLS data
11208 * Inline version of get_thread_static_data () in
11212 int idx, static_data_reg, array_reg, dreg;
11214 GSHAREDVT_FAILURE (op);
11216 // offset &= 0x7fffffff;
11217 // idx = (offset >> 24) - 1;
11218 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11219 MONO_ADD_INS (cfg->cbb, thread_ins);
11220 static_data_reg = alloc_ireg (cfg);
11221 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11223 if (cfg->compile_aot) {
11224 int offset_reg, offset2_reg, idx_reg;
11226 /* For TLS variables, this will return the TLS offset */
11227 EMIT_NEW_SFLDACONST (cfg, ins, field);
11228 offset_reg = ins->dreg;
11229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11230 idx_reg = alloc_ireg (cfg);
11231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11234 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11235 array_reg = alloc_ireg (cfg);
11236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11237 offset2_reg = alloc_ireg (cfg);
11238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11239 dreg = alloc_ireg (cfg);
11240 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11242 offset = (gsize)addr & 0x7fffffff;
11243 idx = (offset >> 24) - 1;
11245 array_reg = alloc_ireg (cfg);
11246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11247 dreg = alloc_ireg (cfg);
11248 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11250 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11251 (cfg->compile_aot && is_special_static) ||
11252 (context_used && is_special_static)) {
11253 MonoInst *iargs [2];
11255 g_assert (field->parent);
11256 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11257 if (context_used) {
11258 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11259 field, MONO_RGCTX_INFO_CLASS_FIELD);
11261 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11263 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11264 } else if (context_used) {
11265 MonoInst *static_data;
11268 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11269 method->klass->name_space, method->klass->name, method->name,
11270 depth, field->offset);
11273 if (mono_class_needs_cctor_run (klass, method))
11274 emit_generic_class_init (cfg, klass);
11277 * The pointer we're computing here is
11279 * super_info.static_data + field->offset
11281 static_data = emit_get_rgctx_klass (cfg, context_used,
11282 klass, MONO_RGCTX_INFO_STATIC_DATA);
11284 if (mini_is_gsharedvt_klass (cfg, klass)) {
11285 MonoInst *offset_ins;
11287 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11288 dreg = alloc_ireg_mp (cfg);
11289 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11290 } else if (field->offset == 0) {
11293 int addr_reg = mono_alloc_preg (cfg);
11294 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11296 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11297 MonoInst *iargs [2];
11299 g_assert (field->parent);
11300 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11301 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11302 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11304 MonoVTable *vtable = NULL;
11306 if (!cfg->compile_aot)
11307 vtable = mono_class_vtable (cfg->domain, klass);
11308 CHECK_TYPELOAD (klass);
11311 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11312 if (!(g_slist_find (class_inits, klass))) {
11313 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11314 if (cfg->verbose_level > 2)
11315 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11316 class_inits = g_slist_prepend (class_inits, klass);
11319 if (cfg->run_cctors) {
11321 /* This makes so that inline cannot trigger */
11322 /* .cctors: too many apps depend on them */
11323 /* running with a specific order... */
11325 if (! vtable->initialized)
11326 INLINE_FAILURE ("class init");
11327 ex = mono_runtime_class_init_full (vtable, FALSE);
11329 set_exception_object (cfg, ex);
11330 goto exception_exit;
11334 if (cfg->compile_aot)
11335 EMIT_NEW_SFLDACONST (cfg, ins, field);
11338 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11340 EMIT_NEW_PCONST (cfg, ins, addr);
11343 MonoInst *iargs [1];
11344 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11345 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11349 /* Generate IR to do the actual load/store operation */
11351 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11352 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11353 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11356 if (op == CEE_LDSFLDA) {
11357 ins->klass = mono_class_from_mono_type (ftype);
11358 ins->type = STACK_PTR;
11360 } else if (op == CEE_STSFLD) {
11363 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11364 store->flags |= ins_flag;
11366 gboolean is_const = FALSE;
11367 MonoVTable *vtable = NULL;
11368 gpointer addr = NULL;
11370 if (!context_used) {
11371 vtable = mono_class_vtable (cfg->domain, klass);
11372 CHECK_TYPELOAD (klass);
11374 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11375 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11376 int ro_type = ftype->type;
11378 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11379 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11380 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11383 GSHAREDVT_FAILURE (op);
11385 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11388 case MONO_TYPE_BOOLEAN:
11390 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11394 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11397 case MONO_TYPE_CHAR:
11399 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11403 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11408 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11412 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11417 case MONO_TYPE_PTR:
11418 case MONO_TYPE_FNPTR:
11419 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11420 type_to_eval_stack_type ((cfg), field->type, *sp);
11423 case MONO_TYPE_STRING:
11424 case MONO_TYPE_OBJECT:
11425 case MONO_TYPE_CLASS:
11426 case MONO_TYPE_SZARRAY:
11427 case MONO_TYPE_ARRAY:
11428 if (!mono_gc_is_moving ()) {
11429 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11430 type_to_eval_stack_type ((cfg), field->type, *sp);
11438 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11443 case MONO_TYPE_VALUETYPE:
11453 CHECK_STACK_OVF (1);
11455 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11456 load->flags |= ins_flag;
11462 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11463 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11464 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11475 token = read32 (ip + 1);
11476 klass = mini_get_class (method, token, generic_context);
11477 CHECK_TYPELOAD (klass);
11478 if (ins_flag & MONO_INST_VOLATILE) {
11479 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11480 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11482 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11483 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11484 ins->flags |= ins_flag;
11485 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11486 generic_class_is_reference_type (cfg, klass)) {
11487 /* insert call to write barrier */
11488 emit_write_barrier (cfg, sp [0], sp [1]);
11500 const char *data_ptr;
11502 guint32 field_token;
11508 token = read32 (ip + 1);
11510 klass = mini_get_class (method, token, generic_context);
11511 CHECK_TYPELOAD (klass);
11513 context_used = mini_class_check_context_used (cfg, klass);
11515 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11516 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11517 ins->sreg1 = sp [0]->dreg;
11518 ins->type = STACK_I4;
11519 ins->dreg = alloc_ireg (cfg);
11520 MONO_ADD_INS (cfg->cbb, ins);
11521 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11524 if (context_used) {
11525 MonoInst *args [3];
11526 MonoClass *array_class = mono_array_class_get (klass, 1);
11527 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11529 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11532 args [0] = emit_get_rgctx_klass (cfg, context_used,
11533 array_class, MONO_RGCTX_INFO_VTABLE);
11538 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11540 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11542 if (cfg->opt & MONO_OPT_SHARED) {
11543 /* Decompose now to avoid problems with references to the domainvar */
11544 MonoInst *iargs [3];
11546 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11547 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11548 iargs [2] = sp [0];
11550 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11552 /* Decompose later since it is needed by abcrem */
11553 MonoClass *array_type = mono_array_class_get (klass, 1);
11554 mono_class_vtable (cfg->domain, array_type);
11555 CHECK_TYPELOAD (array_type);
11557 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11558 ins->dreg = alloc_ireg_ref (cfg);
11559 ins->sreg1 = sp [0]->dreg;
11560 ins->inst_newa_class = klass;
11561 ins->type = STACK_OBJ;
11562 ins->klass = array_type;
11563 MONO_ADD_INS (cfg->cbb, ins);
11564 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11565 cfg->cbb->has_array_access = TRUE;
11567 /* Needed so mono_emit_load_get_addr () gets called */
11568 mono_get_got_var (cfg);
11578 * we inline/optimize the initialization sequence if possible.
11579 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11580 * for small sizes open code the memcpy
11581 * ensure the rva field is big enough
11583 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11584 MonoMethod *memcpy_method = get_memcpy_method ();
11585 MonoInst *iargs [3];
11586 int add_reg = alloc_ireg_mp (cfg);
11588 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11589 if (cfg->compile_aot) {
11590 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11592 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11594 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11595 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11604 if (sp [0]->type != STACK_OBJ)
11607 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11608 ins->dreg = alloc_preg (cfg);
11609 ins->sreg1 = sp [0]->dreg;
11610 ins->type = STACK_I4;
11611 /* This flag will be inherited by the decomposition */
11612 ins->flags |= MONO_INST_FAULT;
11613 MONO_ADD_INS (cfg->cbb, ins);
11614 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11615 cfg->cbb->has_array_access = TRUE;
11623 if (sp [0]->type != STACK_OBJ)
11626 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11628 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11629 CHECK_TYPELOAD (klass);
11630 /* we need to make sure that this array is exactly the type it needs
11631 * to be for correctness. the wrappers are lax with their usage
11632 * so we need to ignore them here
11634 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11635 MonoClass *array_class = mono_array_class_get (klass, 1);
11636 mini_emit_check_array_type (cfg, sp [0], array_class);
11637 CHECK_TYPELOAD (array_class);
11641 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11646 case CEE_LDELEM_I1:
11647 case CEE_LDELEM_U1:
11648 case CEE_LDELEM_I2:
11649 case CEE_LDELEM_U2:
11650 case CEE_LDELEM_I4:
11651 case CEE_LDELEM_U4:
11652 case CEE_LDELEM_I8:
11654 case CEE_LDELEM_R4:
11655 case CEE_LDELEM_R8:
11656 case CEE_LDELEM_REF: {
11662 if (*ip == CEE_LDELEM) {
11664 token = read32 (ip + 1);
11665 klass = mini_get_class (method, token, generic_context);
11666 CHECK_TYPELOAD (klass);
11667 mono_class_init (klass);
11670 klass = array_access_to_klass (*ip);
11672 if (sp [0]->type != STACK_OBJ)
11675 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11677 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11678 // FIXME-VT: OP_ICONST optimization
11679 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11681 ins->opcode = OP_LOADV_MEMBASE;
11682 } else if (sp [1]->opcode == OP_ICONST) {
11683 int array_reg = sp [0]->dreg;
11684 int index_reg = sp [1]->dreg;
11685 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11687 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11688 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11690 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11691 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11694 if (*ip == CEE_LDELEM)
11701 case CEE_STELEM_I1:
11702 case CEE_STELEM_I2:
11703 case CEE_STELEM_I4:
11704 case CEE_STELEM_I8:
11705 case CEE_STELEM_R4:
11706 case CEE_STELEM_R8:
11707 case CEE_STELEM_REF:
11712 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11714 if (*ip == CEE_STELEM) {
11716 token = read32 (ip + 1);
11717 klass = mini_get_class (method, token, generic_context);
11718 CHECK_TYPELOAD (klass);
11719 mono_class_init (klass);
11722 klass = array_access_to_klass (*ip);
11724 if (sp [0]->type != STACK_OBJ)
11727 emit_array_store (cfg, klass, sp, TRUE);
11729 if (*ip == CEE_STELEM)
11736 case CEE_CKFINITE: {
11740 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11741 ins->sreg1 = sp [0]->dreg;
11742 ins->dreg = alloc_freg (cfg);
11743 ins->type = STACK_R8;
11744 MONO_ADD_INS (bblock, ins);
11746 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11751 case CEE_REFANYVAL: {
11752 MonoInst *src_var, *src;
11754 int klass_reg = alloc_preg (cfg);
11755 int dreg = alloc_preg (cfg);
11757 GSHAREDVT_FAILURE (*ip);
11760 MONO_INST_NEW (cfg, ins, *ip);
11763 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11764 CHECK_TYPELOAD (klass);
11766 context_used = mini_class_check_context_used (cfg, klass);
11769 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11771 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11772 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11775 if (context_used) {
11776 MonoInst *klass_ins;
11778 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11779 klass, MONO_RGCTX_INFO_KLASS);
11782 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11783 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11785 mini_emit_class_check (cfg, klass_reg, klass);
11787 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11788 ins->type = STACK_MP;
11793 case CEE_MKREFANY: {
11794 MonoInst *loc, *addr;
11796 GSHAREDVT_FAILURE (*ip);
11799 MONO_INST_NEW (cfg, ins, *ip);
11802 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11803 CHECK_TYPELOAD (klass);
11805 context_used = mini_class_check_context_used (cfg, klass);
11807 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11808 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11810 if (context_used) {
11811 MonoInst *const_ins;
11812 int type_reg = alloc_preg (cfg);
11814 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11815 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11818 } else if (cfg->compile_aot) {
11819 int const_reg = alloc_preg (cfg);
11820 int type_reg = alloc_preg (cfg);
11822 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11827 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11828 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11832 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11833 ins->type = STACK_VTYPE;
11834 ins->klass = mono_defaults.typed_reference_class;
11839 case CEE_LDTOKEN: {
11841 MonoClass *handle_class;
11843 CHECK_STACK_OVF (1);
11846 n = read32 (ip + 1);
11848 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11849 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11850 handle = mono_method_get_wrapper_data (method, n);
11851 handle_class = mono_method_get_wrapper_data (method, n + 1);
11852 if (handle_class == mono_defaults.typehandle_class)
11853 handle = &((MonoClass*)handle)->byval_arg;
11856 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11861 mono_class_init (handle_class);
11862 if (cfg->generic_sharing_context) {
11863 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11864 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11865 /* This case handles ldtoken
11866 of an open type, like for
11869 } else if (handle_class == mono_defaults.typehandle_class) {
11870 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11871 } else if (handle_class == mono_defaults.fieldhandle_class)
11872 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11873 else if (handle_class == mono_defaults.methodhandle_class)
11874 context_used = mini_method_check_context_used (cfg, handle);
11876 g_assert_not_reached ();
11879 if ((cfg->opt & MONO_OPT_SHARED) &&
11880 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11881 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11882 MonoInst *addr, *vtvar, *iargs [3];
11883 int method_context_used;
11885 method_context_used = mini_method_check_context_used (cfg, method);
11887 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11889 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11890 EMIT_NEW_ICONST (cfg, iargs [1], n);
11891 if (method_context_used) {
11892 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11893 method, MONO_RGCTX_INFO_METHOD);
11894 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11896 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11897 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11899 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11903 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11905 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11906 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11907 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11908 (cmethod->klass == mono_defaults.systemtype_class) &&
11909 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11910 MonoClass *tclass = mono_class_from_mono_type (handle);
11912 mono_class_init (tclass);
11913 if (context_used) {
11914 ins = emit_get_rgctx_klass (cfg, context_used,
11915 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11916 } else if (cfg->compile_aot) {
11917 if (method->wrapper_type) {
11918 mono_error_init (&error); //got to do it since there are multiple conditionals below
11919 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11920 /* Special case for static synchronized wrappers */
11921 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11923 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11924 /* FIXME: n is not a normal token */
11926 EMIT_NEW_PCONST (cfg, ins, NULL);
11929 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11932 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11934 ins->type = STACK_OBJ;
11935 ins->klass = cmethod->klass;
11938 MonoInst *addr, *vtvar;
11940 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11942 if (context_used) {
11943 if (handle_class == mono_defaults.typehandle_class) {
11944 ins = emit_get_rgctx_klass (cfg, context_used,
11945 mono_class_from_mono_type (handle),
11946 MONO_RGCTX_INFO_TYPE);
11947 } else if (handle_class == mono_defaults.methodhandle_class) {
11948 ins = emit_get_rgctx_method (cfg, context_used,
11949 handle, MONO_RGCTX_INFO_METHOD);
11950 } else if (handle_class == mono_defaults.fieldhandle_class) {
11951 ins = emit_get_rgctx_field (cfg, context_used,
11952 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11954 g_assert_not_reached ();
11956 } else if (cfg->compile_aot) {
11957 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11959 EMIT_NEW_PCONST (cfg, ins, handle);
11961 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11963 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11973 MONO_INST_NEW (cfg, ins, OP_THROW);
11975 ins->sreg1 = sp [0]->dreg;
11977 bblock->out_of_line = TRUE;
11978 MONO_ADD_INS (bblock, ins);
11979 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11980 MONO_ADD_INS (bblock, ins);
11983 link_bblock (cfg, bblock, end_bblock);
11984 start_new_bblock = 1;
11986 case CEE_ENDFINALLY:
11987 /* mono_save_seq_point_info () depends on this */
11988 if (sp != stack_start)
11989 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11990 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11991 MONO_ADD_INS (bblock, ins);
11993 start_new_bblock = 1;
11996 * Control will leave the method so empty the stack, otherwise
11997 * the next basic block will start with a nonempty stack.
11999 while (sp != stack_start) {
12004 case CEE_LEAVE_S: {
12007 if (*ip == CEE_LEAVE) {
12009 target = ip + 5 + (gint32)read32(ip + 1);
12012 target = ip + 2 + (signed char)(ip [1]);
12015 /* empty the stack */
12016 while (sp != stack_start) {
12021 * If this leave statement is in a catch block, check for a
12022 * pending exception, and rethrow it if necessary.
12023 * We avoid doing this in runtime invoke wrappers, since those are called
12024 * by native code which excepts the wrapper to catch all exceptions.
12026 for (i = 0; i < header->num_clauses; ++i) {
12027 MonoExceptionClause *clause = &header->clauses [i];
12030 * Use <= in the final comparison to handle clauses with multiple
12031 * leave statements, like in bug #78024.
12032 * The ordering of the exception clauses guarantees that we find the
12033 * innermost clause.
12035 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12037 MonoBasicBlock *dont_throw;
12042 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12045 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12047 NEW_BBLOCK (cfg, dont_throw);
12050 * Currently, we always rethrow the abort exception, despite the
12051 * fact that this is not correct. See thread6.cs for an example.
12052 * But propagating the abort exception is more important than
12053 * getting the sematics right.
12055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12057 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12059 MONO_START_BB (cfg, dont_throw);
12064 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12066 MonoExceptionClause *clause;
12068 for (tmp = handlers; tmp; tmp = tmp->next) {
12069 clause = tmp->data;
12070 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12072 link_bblock (cfg, bblock, tblock);
12073 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12074 ins->inst_target_bb = tblock;
12075 ins->inst_eh_block = clause;
12076 MONO_ADD_INS (bblock, ins);
12077 bblock->has_call_handler = 1;
12078 if (COMPILE_LLVM (cfg)) {
12079 MonoBasicBlock *target_bb;
12082 * Link the finally bblock with the target, since it will
12083 * conceptually branch there.
12084 * FIXME: Have to link the bblock containing the endfinally.
12086 GET_BBLOCK (cfg, target_bb, target);
12087 link_bblock (cfg, tblock, target_bb);
12090 g_list_free (handlers);
12093 MONO_INST_NEW (cfg, ins, OP_BR);
12094 MONO_ADD_INS (bblock, ins);
12095 GET_BBLOCK (cfg, tblock, target);
12096 link_bblock (cfg, bblock, tblock);
12097 ins->inst_target_bb = tblock;
12098 start_new_bblock = 1;
12100 if (*ip == CEE_LEAVE)
12109 * Mono specific opcodes
12111 case MONO_CUSTOM_PREFIX: {
12113 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12117 case CEE_MONO_ICALL: {
12119 MonoJitICallInfo *info;
12121 token = read32 (ip + 2);
12122 func = mono_method_get_wrapper_data (method, token);
12123 info = mono_find_jit_icall_by_addr (func);
12125 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12128 CHECK_STACK (info->sig->param_count);
12129 sp -= info->sig->param_count;
12131 ins = mono_emit_jit_icall (cfg, info->func, sp);
12132 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12136 inline_costs += 10 * num_calls++;
12140 case CEE_MONO_LDPTR_CARD_TABLE: {
12142 gpointer card_mask;
12143 CHECK_STACK_OVF (1);
12145 if (cfg->compile_aot)
12146 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12148 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12152 inline_costs += 10 * num_calls++;
12155 case CEE_MONO_LDPTR_NURSERY_START: {
12158 CHECK_STACK_OVF (1);
12160 if (cfg->compile_aot)
12161 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12163 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12167 inline_costs += 10 * num_calls++;
12170 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12171 CHECK_STACK_OVF (1);
12173 if (cfg->compile_aot)
12174 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12176 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12180 inline_costs += 10 * num_calls++;
12183 case CEE_MONO_LDPTR: {
12186 CHECK_STACK_OVF (1);
12188 token = read32 (ip + 2);
12190 ptr = mono_method_get_wrapper_data (method, token);
12191 EMIT_NEW_PCONST (cfg, ins, ptr);
12194 inline_costs += 10 * num_calls++;
12195 /* Can't embed random pointers into AOT code */
12199 case CEE_MONO_JIT_ICALL_ADDR: {
12200 MonoJitICallInfo *callinfo;
12203 CHECK_STACK_OVF (1);
12205 token = read32 (ip + 2);
12207 ptr = mono_method_get_wrapper_data (method, token);
12208 callinfo = mono_find_jit_icall_by_addr (ptr);
12209 g_assert (callinfo);
12210 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12213 inline_costs += 10 * num_calls++;
12216 case CEE_MONO_ICALL_ADDR: {
12217 MonoMethod *cmethod;
12220 CHECK_STACK_OVF (1);
12222 token = read32 (ip + 2);
12224 cmethod = mono_method_get_wrapper_data (method, token);
12226 if (cfg->compile_aot) {
12227 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12229 ptr = mono_lookup_internal_call (cmethod);
12231 EMIT_NEW_PCONST (cfg, ins, ptr);
12237 case CEE_MONO_VTADDR: {
12238 MonoInst *src_var, *src;
12244 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12245 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12250 case CEE_MONO_NEWOBJ: {
12251 MonoInst *iargs [2];
12253 CHECK_STACK_OVF (1);
12255 token = read32 (ip + 2);
12256 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12257 mono_class_init (klass);
12258 NEW_DOMAINCONST (cfg, iargs [0]);
12259 MONO_ADD_INS (cfg->cbb, iargs [0]);
12260 NEW_CLASSCONST (cfg, iargs [1], klass);
12261 MONO_ADD_INS (cfg->cbb, iargs [1]);
12262 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12264 inline_costs += 10 * num_calls++;
12267 case CEE_MONO_OBJADDR:
12270 MONO_INST_NEW (cfg, ins, OP_MOVE);
12271 ins->dreg = alloc_ireg_mp (cfg);
12272 ins->sreg1 = sp [0]->dreg;
12273 ins->type = STACK_MP;
12274 MONO_ADD_INS (cfg->cbb, ins);
12278 case CEE_MONO_LDNATIVEOBJ:
12280 * Similar to LDOBJ, but instead load the unmanaged
12281 * representation of the vtype to the stack.
12286 token = read32 (ip + 2);
12287 klass = mono_method_get_wrapper_data (method, token);
12288 g_assert (klass->valuetype);
12289 mono_class_init (klass);
12292 MonoInst *src, *dest, *temp;
12295 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12296 temp->backend.is_pinvoke = 1;
12297 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12298 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12300 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12301 dest->type = STACK_VTYPE;
12302 dest->klass = klass;
12308 case CEE_MONO_RETOBJ: {
12310 * Same as RET, but return the native representation of a vtype
12313 g_assert (cfg->ret);
12314 g_assert (mono_method_signature (method)->pinvoke);
12319 token = read32 (ip + 2);
12320 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12322 if (!cfg->vret_addr) {
12323 g_assert (cfg->ret_var_is_local);
12325 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12327 EMIT_NEW_RETLOADA (cfg, ins);
12329 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12331 if (sp != stack_start)
12334 MONO_INST_NEW (cfg, ins, OP_BR);
12335 ins->inst_target_bb = end_bblock;
12336 MONO_ADD_INS (bblock, ins);
12337 link_bblock (cfg, bblock, end_bblock);
12338 start_new_bblock = 1;
12342 case CEE_MONO_CISINST:
12343 case CEE_MONO_CCASTCLASS: {
12348 token = read32 (ip + 2);
12349 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12350 if (ip [1] == CEE_MONO_CISINST)
12351 ins = handle_cisinst (cfg, klass, sp [0]);
12353 ins = handle_ccastclass (cfg, klass, sp [0]);
12359 case CEE_MONO_SAVE_LMF:
12360 case CEE_MONO_RESTORE_LMF:
12361 #ifdef MONO_ARCH_HAVE_LMF_OPS
12362 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12363 MONO_ADD_INS (bblock, ins);
12364 cfg->need_lmf_area = TRUE;
12368 case CEE_MONO_CLASSCONST:
12369 CHECK_STACK_OVF (1);
12371 token = read32 (ip + 2);
12372 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12375 inline_costs += 10 * num_calls++;
12377 case CEE_MONO_NOT_TAKEN:
12378 bblock->out_of_line = TRUE;
12381 case CEE_MONO_TLS: {
12384 CHECK_STACK_OVF (1);
12386 key = (gint32)read32 (ip + 2);
12387 g_assert (key < TLS_KEY_NUM);
12389 ins = mono_create_tls_get (cfg, key);
12391 if (cfg->compile_aot) {
12393 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12394 ins->dreg = alloc_preg (cfg);
12395 ins->type = STACK_PTR;
12397 g_assert_not_reached ();
12400 ins->type = STACK_PTR;
12401 MONO_ADD_INS (bblock, ins);
12406 case CEE_MONO_DYN_CALL: {
12407 MonoCallInst *call;
12409 /* It would be easier to call a trampoline, but that would put an
12410 * extra frame on the stack, confusing exception handling. So
12411 * implement it inline using an opcode for now.
12414 if (!cfg->dyn_call_var) {
12415 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12416 /* prevent it from being register allocated */
12417 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12420 /* Has to use a call inst since it local regalloc expects it */
12421 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12422 ins = (MonoInst*)call;
12424 ins->sreg1 = sp [0]->dreg;
12425 ins->sreg2 = sp [1]->dreg;
12426 MONO_ADD_INS (bblock, ins);
12428 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12431 inline_costs += 10 * num_calls++;
12435 case CEE_MONO_MEMORY_BARRIER: {
12437 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12441 case CEE_MONO_JIT_ATTACH: {
12442 MonoInst *args [16], *domain_ins;
12443 MonoInst *ad_ins, *jit_tls_ins;
12444 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12446 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12448 EMIT_NEW_PCONST (cfg, ins, NULL);
12449 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12451 ad_ins = mono_get_domain_intrinsic (cfg);
12452 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12454 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12455 NEW_BBLOCK (cfg, next_bb);
12456 NEW_BBLOCK (cfg, call_bb);
12458 if (cfg->compile_aot) {
12459 /* AOT code is only used in the root domain */
12460 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12462 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12464 MONO_ADD_INS (cfg->cbb, ad_ins);
12465 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12466 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12468 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12472 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12473 MONO_START_BB (cfg, call_bb);
12476 if (cfg->compile_aot) {
12477 /* AOT code is only used in the root domain */
12478 EMIT_NEW_PCONST (cfg, args [0], NULL);
12480 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12482 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12483 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12486 MONO_START_BB (cfg, next_bb);
12492 case CEE_MONO_JIT_DETACH: {
12493 MonoInst *args [16];
12495 /* Restore the original domain */
12496 dreg = alloc_ireg (cfg);
12497 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12498 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12503 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12509 case CEE_PREFIX1: {
12512 case CEE_ARGLIST: {
12513 /* somewhat similar to LDTOKEN */
12514 MonoInst *addr, *vtvar;
12515 CHECK_STACK_OVF (1);
12516 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12518 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12519 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12521 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12522 ins->type = STACK_VTYPE;
12523 ins->klass = mono_defaults.argumenthandle_class;
12533 MonoInst *cmp, *arg1, *arg2;
12541 * The following transforms:
12542 * CEE_CEQ into OP_CEQ
12543 * CEE_CGT into OP_CGT
12544 * CEE_CGT_UN into OP_CGT_UN
12545 * CEE_CLT into OP_CLT
12546 * CEE_CLT_UN into OP_CLT_UN
12548 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12550 MONO_INST_NEW (cfg, ins, cmp->opcode);
12551 cmp->sreg1 = arg1->dreg;
12552 cmp->sreg2 = arg2->dreg;
12553 type_from_op (cfg, cmp, arg1, arg2);
12555 add_widen_op (cfg, cmp, &arg1, &arg2);
12556 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12557 cmp->opcode = OP_LCOMPARE;
12558 else if (arg1->type == STACK_R4)
12559 cmp->opcode = OP_RCOMPARE;
12560 else if (arg1->type == STACK_R8)
12561 cmp->opcode = OP_FCOMPARE;
12563 cmp->opcode = OP_ICOMPARE;
12564 MONO_ADD_INS (bblock, cmp);
12565 ins->type = STACK_I4;
12566 ins->dreg = alloc_dreg (cfg, ins->type);
12567 type_from_op (cfg, ins, arg1, arg2);
12569 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12571 * The backends expect the fceq opcodes to do the
12574 ins->sreg1 = cmp->sreg1;
12575 ins->sreg2 = cmp->sreg2;
12578 MONO_ADD_INS (bblock, ins);
12584 MonoInst *argconst;
12585 MonoMethod *cil_method;
12587 CHECK_STACK_OVF (1);
12589 n = read32 (ip + 2);
12590 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12591 if (!cmethod || mono_loader_get_last_error ())
12593 mono_class_init (cmethod->klass);
12595 mono_save_token_info (cfg, image, n, cmethod);
12597 context_used = mini_method_check_context_used (cfg, cmethod);
12599 cil_method = cmethod;
12600 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12601 METHOD_ACCESS_FAILURE (method, cil_method);
12603 if (mono_security_cas_enabled ()) {
12604 if (check_linkdemand (cfg, method, cmethod))
12605 INLINE_FAILURE ("linkdemand");
12606 CHECK_CFG_EXCEPTION;
12607 } else if (mono_security_core_clr_enabled ()) {
12608 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12612 * Optimize the common case of ldftn+delegate creation
12614 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12615 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12616 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12617 MonoInst *target_ins, *handle_ins;
12618 MonoMethod *invoke;
12619 int invoke_context_used;
12621 invoke = mono_get_delegate_invoke (ctor_method->klass);
12622 if (!invoke || !mono_method_signature (invoke))
12625 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12627 target_ins = sp [-1];
12629 if (mono_security_core_clr_enabled ())
12630 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12632 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12633 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12634 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12636 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12640 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12641 /* FIXME: SGEN support */
12642 if (invoke_context_used == 0) {
12644 if (cfg->verbose_level > 3)
12645 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12646 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12649 CHECK_CFG_EXCEPTION;
12660 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12661 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12665 inline_costs += 10 * num_calls++;
12668 case CEE_LDVIRTFTN: {
12669 MonoInst *args [2];
12673 n = read32 (ip + 2);
12674 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12675 if (!cmethod || mono_loader_get_last_error ())
12677 mono_class_init (cmethod->klass);
12679 context_used = mini_method_check_context_used (cfg, cmethod);
12681 if (mono_security_cas_enabled ()) {
12682 if (check_linkdemand (cfg, method, cmethod))
12683 INLINE_FAILURE ("linkdemand");
12684 CHECK_CFG_EXCEPTION;
12685 } else if (mono_security_core_clr_enabled ()) {
12686 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12690 * Optimize the common case of ldvirtftn+delegate creation
12692 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12693 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12694 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12695 MonoInst *target_ins, *handle_ins;
12696 MonoMethod *invoke;
12697 int invoke_context_used;
12699 invoke = mono_get_delegate_invoke (ctor_method->klass);
12700 if (!invoke || !mono_method_signature (invoke))
12703 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12705 target_ins = sp [-1];
12707 if (mono_security_core_clr_enabled ())
12708 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12710 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12711 /* FIXME: SGEN support */
12712 if (invoke_context_used == 0) {
12714 if (cfg->verbose_level > 3)
12715 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12716 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12719 CHECK_CFG_EXCEPTION;
12733 args [1] = emit_get_rgctx_method (cfg, context_used,
12734 cmethod, MONO_RGCTX_INFO_METHOD);
12737 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12739 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12742 inline_costs += 10 * num_calls++;
12746 CHECK_STACK_OVF (1);
12748 n = read16 (ip + 2);
12750 EMIT_NEW_ARGLOAD (cfg, ins, n);
12755 CHECK_STACK_OVF (1);
12757 n = read16 (ip + 2);
12759 NEW_ARGLOADA (cfg, ins, n);
12760 MONO_ADD_INS (cfg->cbb, ins);
12768 n = read16 (ip + 2);
12770 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12772 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12776 CHECK_STACK_OVF (1);
12778 n = read16 (ip + 2);
12780 EMIT_NEW_LOCLOAD (cfg, ins, n);
12785 unsigned char *tmp_ip;
12786 CHECK_STACK_OVF (1);
12788 n = read16 (ip + 2);
12791 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12797 EMIT_NEW_LOCLOADA (cfg, ins, n);
12806 n = read16 (ip + 2);
12808 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12810 emit_stloc_ir (cfg, sp, header, n);
12817 if (sp != stack_start)
12819 if (cfg->method != method)
12821 * Inlining this into a loop in a parent could lead to
12822 * stack overflows which is different behavior than the
12823 * non-inlined case, thus disable inlining in this case.
12825 INLINE_FAILURE("localloc");
12827 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12828 ins->dreg = alloc_preg (cfg);
12829 ins->sreg1 = sp [0]->dreg;
12830 ins->type = STACK_PTR;
12831 MONO_ADD_INS (cfg->cbb, ins);
12833 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12835 ins->flags |= MONO_INST_INIT;
12840 case CEE_ENDFILTER: {
12841 MonoExceptionClause *clause, *nearest;
12846 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12848 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12849 ins->sreg1 = (*sp)->dreg;
12850 MONO_ADD_INS (bblock, ins);
12851 start_new_bblock = 1;
12855 for (cc = 0; cc < header->num_clauses; ++cc) {
12856 clause = &header->clauses [cc];
12857 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12858 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12859 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12862 g_assert (nearest);
12863 if ((ip - header->code) != nearest->handler_offset)
12868 case CEE_UNALIGNED_:
12869 ins_flag |= MONO_INST_UNALIGNED;
12870 /* FIXME: record alignment? we can assume 1 for now */
12874 case CEE_VOLATILE_:
12875 ins_flag |= MONO_INST_VOLATILE;
12879 ins_flag |= MONO_INST_TAILCALL;
12880 cfg->flags |= MONO_CFG_HAS_TAIL;
12881 /* Can't inline tail calls at this time */
12882 inline_costs += 100000;
12889 token = read32 (ip + 2);
12890 klass = mini_get_class (method, token, generic_context);
12891 CHECK_TYPELOAD (klass);
12892 if (generic_class_is_reference_type (cfg, klass))
12893 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12895 mini_emit_initobj (cfg, *sp, NULL, klass);
12899 case CEE_CONSTRAINED_:
12901 token = read32 (ip + 2);
12902 constrained_class = mini_get_class (method, token, generic_context);
12903 CHECK_TYPELOAD (constrained_class);
12907 case CEE_INITBLK: {
12908 MonoInst *iargs [3];
12912 /* Skip optimized paths for volatile operations. */
12913 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12914 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12915 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12916 /* emit_memset only works when val == 0 */
12917 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12920 iargs [0] = sp [0];
12921 iargs [1] = sp [1];
12922 iargs [2] = sp [2];
12923 if (ip [1] == CEE_CPBLK) {
12925 * FIXME: It's unclear whether we should be emitting both the acquire
12926 * and release barriers for cpblk. It is technically both a load and
12927 * store operation, so it seems like that's the sensible thing to do.
12929 * FIXME: We emit full barriers on both sides of the operation for
12930 * simplicity. We should have a separate atomic memcpy method instead.
12932 MonoMethod *memcpy_method = get_memcpy_method ();
12934 if (ins_flag & MONO_INST_VOLATILE)
12935 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12937 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12938 call->flags |= ins_flag;
12940 if (ins_flag & MONO_INST_VOLATILE)
12941 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12943 MonoMethod *memset_method = get_memset_method ();
12944 if (ins_flag & MONO_INST_VOLATILE) {
12945 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12946 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12948 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12949 call->flags |= ins_flag;
12960 ins_flag |= MONO_INST_NOTYPECHECK;
12962 ins_flag |= MONO_INST_NORANGECHECK;
12963 /* we ignore the no-nullcheck for now since we
12964 * really do it explicitly only when doing callvirt->call
12968 case CEE_RETHROW: {
12970 int handler_offset = -1;
12972 for (i = 0; i < header->num_clauses; ++i) {
12973 MonoExceptionClause *clause = &header->clauses [i];
12974 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12975 handler_offset = clause->handler_offset;
12980 bblock->flags |= BB_EXCEPTION_UNSAFE;
12982 if (handler_offset == -1)
12985 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12986 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12987 ins->sreg1 = load->dreg;
12988 MONO_ADD_INS (bblock, ins);
12990 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12991 MONO_ADD_INS (bblock, ins);
12994 link_bblock (cfg, bblock, end_bblock);
12995 start_new_bblock = 1;
13003 CHECK_STACK_OVF (1);
13005 token = read32 (ip + 2);
13006 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13007 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13010 val = mono_type_size (type, &ialign);
13012 MonoClass *klass = mini_get_class (method, token, generic_context);
13013 CHECK_TYPELOAD (klass);
13015 val = mono_type_size (&klass->byval_arg, &ialign);
13017 if (mini_is_gsharedvt_klass (cfg, klass))
13018 GSHAREDVT_FAILURE (*ip);
13020 EMIT_NEW_ICONST (cfg, ins, val);
13025 case CEE_REFANYTYPE: {
13026 MonoInst *src_var, *src;
13028 GSHAREDVT_FAILURE (*ip);
13034 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13036 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13037 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13043 case CEE_READONLY_:
13056 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13066 g_warning ("opcode 0x%02x not handled", *ip);
13070 if (start_new_bblock != 1)
13073 bblock->cil_length = ip - bblock->cil_code;
13074 if (bblock->next_bb) {
13075 /* This could already be set because of inlining, #693905 */
13076 MonoBasicBlock *bb = bblock;
13078 while (bb->next_bb)
13080 bb->next_bb = end_bblock;
13082 bblock->next_bb = end_bblock;
13085 if (cfg->method == method && cfg->domainvar) {
13087 MonoInst *get_domain;
13089 cfg->cbb = init_localsbb;
13091 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13092 MONO_ADD_INS (cfg->cbb, get_domain);
13094 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13096 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13097 MONO_ADD_INS (cfg->cbb, store);
13100 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13101 if (cfg->compile_aot)
13102 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13103 mono_get_got_var (cfg);
13106 if (cfg->method == method && cfg->got_var)
13107 mono_emit_load_got_addr (cfg);
13109 if (init_localsbb) {
13110 cfg->cbb = init_localsbb;
13112 for (i = 0; i < header->num_locals; ++i) {
13113 emit_init_local (cfg, i, header->locals [i], init_locals);
13117 if (cfg->init_ref_vars && cfg->method == method) {
13118 /* Emit initialization for ref vars */
13119 // FIXME: Avoid duplication initialization for IL locals.
13120 for (i = 0; i < cfg->num_varinfo; ++i) {
13121 MonoInst *ins = cfg->varinfo [i];
13123 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13124 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13128 if (cfg->lmf_var && cfg->method == method) {
13129 cfg->cbb = init_localsbb;
13130 emit_push_lmf (cfg);
13133 cfg->cbb = init_localsbb;
13134 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13137 MonoBasicBlock *bb;
13140 * Make seq points at backward branch targets interruptable.
13142 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13143 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13144 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13147 /* Add a sequence point for method entry/exit events */
13148 if (seq_points && cfg->gen_sdb_seq_points) {
13149 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13150 MONO_ADD_INS (init_localsbb, ins);
13151 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13152 MONO_ADD_INS (cfg->bb_exit, ins);
13156 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13157 * the code they refer to was dead (#11880).
13159 if (sym_seq_points) {
13160 for (i = 0; i < header->code_size; ++i) {
13161 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13164 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13165 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13172 if (cfg->method == method) {
13173 MonoBasicBlock *bb;
13174 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13175 bb->region = mono_find_block_region (cfg, bb->real_offset);
13177 mono_create_spvar_for_region (cfg, bb->region);
13178 if (cfg->verbose_level > 2)
13179 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13183 if (inline_costs < 0) {
13186 /* Method is too large */
13187 mname = mono_method_full_name (method, TRUE);
13188 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13189 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13193 if ((cfg->verbose_level > 2) && (cfg->method == method))
13194 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13199 g_assert (!mono_error_ok (&cfg->error));
13203 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13207 set_exception_type_from_invalid_il (cfg, method, ip);
13211 g_slist_free (class_inits);
13212 mono_basic_block_free (original_bb);
13213 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13214 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13215 if (cfg->exception_type)
13218 return inline_costs;
13222 store_membase_reg_to_store_membase_imm (int opcode)
13225 case OP_STORE_MEMBASE_REG:
13226 return OP_STORE_MEMBASE_IMM;
13227 case OP_STOREI1_MEMBASE_REG:
13228 return OP_STOREI1_MEMBASE_IMM;
13229 case OP_STOREI2_MEMBASE_REG:
13230 return OP_STOREI2_MEMBASE_IMM;
13231 case OP_STOREI4_MEMBASE_REG:
13232 return OP_STOREI4_MEMBASE_IMM;
13233 case OP_STOREI8_MEMBASE_REG:
13234 return OP_STOREI8_MEMBASE_IMM;
13236 g_assert_not_reached ();
13243 mono_op_to_op_imm (int opcode)
13247 return OP_IADD_IMM;
13249 return OP_ISUB_IMM;
13251 return OP_IDIV_IMM;
13253 return OP_IDIV_UN_IMM;
13255 return OP_IREM_IMM;
13257 return OP_IREM_UN_IMM;
13259 return OP_IMUL_IMM;
13261 return OP_IAND_IMM;
13265 return OP_IXOR_IMM;
13267 return OP_ISHL_IMM;
13269 return OP_ISHR_IMM;
13271 return OP_ISHR_UN_IMM;
13274 return OP_LADD_IMM;
13276 return OP_LSUB_IMM;
13278 return OP_LAND_IMM;
13282 return OP_LXOR_IMM;
13284 return OP_LSHL_IMM;
13286 return OP_LSHR_IMM;
13288 return OP_LSHR_UN_IMM;
13289 #if SIZEOF_REGISTER == 8
13291 return OP_LREM_IMM;
13295 return OP_COMPARE_IMM;
13297 return OP_ICOMPARE_IMM;
13299 return OP_LCOMPARE_IMM;
13301 case OP_STORE_MEMBASE_REG:
13302 return OP_STORE_MEMBASE_IMM;
13303 case OP_STOREI1_MEMBASE_REG:
13304 return OP_STOREI1_MEMBASE_IMM;
13305 case OP_STOREI2_MEMBASE_REG:
13306 return OP_STOREI2_MEMBASE_IMM;
13307 case OP_STOREI4_MEMBASE_REG:
13308 return OP_STOREI4_MEMBASE_IMM;
13310 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13312 return OP_X86_PUSH_IMM;
13313 case OP_X86_COMPARE_MEMBASE_REG:
13314 return OP_X86_COMPARE_MEMBASE_IMM;
13316 #if defined(TARGET_AMD64)
13317 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13318 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13320 case OP_VOIDCALL_REG:
13321 return OP_VOIDCALL;
13329 return OP_LOCALLOC_IMM;
13336 ldind_to_load_membase (int opcode)
13340 return OP_LOADI1_MEMBASE;
13342 return OP_LOADU1_MEMBASE;
13344 return OP_LOADI2_MEMBASE;
13346 return OP_LOADU2_MEMBASE;
13348 return OP_LOADI4_MEMBASE;
13350 return OP_LOADU4_MEMBASE;
13352 return OP_LOAD_MEMBASE;
13353 case CEE_LDIND_REF:
13354 return OP_LOAD_MEMBASE;
13356 return OP_LOADI8_MEMBASE;
13358 return OP_LOADR4_MEMBASE;
13360 return OP_LOADR8_MEMBASE;
13362 g_assert_not_reached ();
13369 stind_to_store_membase (int opcode)
13373 return OP_STOREI1_MEMBASE_REG;
13375 return OP_STOREI2_MEMBASE_REG;
13377 return OP_STOREI4_MEMBASE_REG;
13379 case CEE_STIND_REF:
13380 return OP_STORE_MEMBASE_REG;
13382 return OP_STOREI8_MEMBASE_REG;
13384 return OP_STORER4_MEMBASE_REG;
13386 return OP_STORER8_MEMBASE_REG;
13388 g_assert_not_reached ();
13395 mono_load_membase_to_load_mem (int opcode)
13397 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13398 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13400 case OP_LOAD_MEMBASE:
13401 return OP_LOAD_MEM;
13402 case OP_LOADU1_MEMBASE:
13403 return OP_LOADU1_MEM;
13404 case OP_LOADU2_MEMBASE:
13405 return OP_LOADU2_MEM;
13406 case OP_LOADI4_MEMBASE:
13407 return OP_LOADI4_MEM;
13408 case OP_LOADU4_MEMBASE:
13409 return OP_LOADU4_MEM;
13410 #if SIZEOF_REGISTER == 8
13411 case OP_LOADI8_MEMBASE:
13412 return OP_LOADI8_MEM;
13421 op_to_op_dest_membase (int store_opcode, int opcode)
13423 #if defined(TARGET_X86)
13424 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13429 return OP_X86_ADD_MEMBASE_REG;
13431 return OP_X86_SUB_MEMBASE_REG;
13433 return OP_X86_AND_MEMBASE_REG;
13435 return OP_X86_OR_MEMBASE_REG;
13437 return OP_X86_XOR_MEMBASE_REG;
13440 return OP_X86_ADD_MEMBASE_IMM;
13443 return OP_X86_SUB_MEMBASE_IMM;
13446 return OP_X86_AND_MEMBASE_IMM;
13449 return OP_X86_OR_MEMBASE_IMM;
13452 return OP_X86_XOR_MEMBASE_IMM;
13458 #if defined(TARGET_AMD64)
13459 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13464 return OP_X86_ADD_MEMBASE_REG;
13466 return OP_X86_SUB_MEMBASE_REG;
13468 return OP_X86_AND_MEMBASE_REG;
13470 return OP_X86_OR_MEMBASE_REG;
13472 return OP_X86_XOR_MEMBASE_REG;
13474 return OP_X86_ADD_MEMBASE_IMM;
13476 return OP_X86_SUB_MEMBASE_IMM;
13478 return OP_X86_AND_MEMBASE_IMM;
13480 return OP_X86_OR_MEMBASE_IMM;
13482 return OP_X86_XOR_MEMBASE_IMM;
13484 return OP_AMD64_ADD_MEMBASE_REG;
13486 return OP_AMD64_SUB_MEMBASE_REG;
13488 return OP_AMD64_AND_MEMBASE_REG;
13490 return OP_AMD64_OR_MEMBASE_REG;
13492 return OP_AMD64_XOR_MEMBASE_REG;
13495 return OP_AMD64_ADD_MEMBASE_IMM;
13498 return OP_AMD64_SUB_MEMBASE_IMM;
13501 return OP_AMD64_AND_MEMBASE_IMM;
13504 return OP_AMD64_OR_MEMBASE_IMM;
13507 return OP_AMD64_XOR_MEMBASE_IMM;
13517 op_to_op_store_membase (int store_opcode, int opcode)
13519 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13522 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13523 return OP_X86_SETEQ_MEMBASE;
13525 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13526 return OP_X86_SETNE_MEMBASE;
13534 op_to_op_src1_membase (int load_opcode, int opcode)
13537 /* FIXME: This has sign extension issues */
13539 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13540 return OP_X86_COMPARE_MEMBASE8_IMM;
13543 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13548 return OP_X86_PUSH_MEMBASE;
13549 case OP_COMPARE_IMM:
13550 case OP_ICOMPARE_IMM:
13551 return OP_X86_COMPARE_MEMBASE_IMM;
13554 return OP_X86_COMPARE_MEMBASE_REG;
13558 #ifdef TARGET_AMD64
13559 /* FIXME: This has sign extension issues */
13561 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13562 return OP_X86_COMPARE_MEMBASE8_IMM;
13567 #ifdef __mono_ilp32__
13568 if (load_opcode == OP_LOADI8_MEMBASE)
13570 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13572 return OP_X86_PUSH_MEMBASE;
13574 /* FIXME: This only works for 32 bit immediates
13575 case OP_COMPARE_IMM:
13576 case OP_LCOMPARE_IMM:
13577 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13578 return OP_AMD64_COMPARE_MEMBASE_IMM;
13580 case OP_ICOMPARE_IMM:
13581 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13582 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13586 #ifdef __mono_ilp32__
13587 if (load_opcode == OP_LOAD_MEMBASE)
13588 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13589 if (load_opcode == OP_LOADI8_MEMBASE)
13591 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13593 return OP_AMD64_COMPARE_MEMBASE_REG;
13596 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13597 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13606 op_to_op_src2_membase (int load_opcode, int opcode)
13609 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13615 return OP_X86_COMPARE_REG_MEMBASE;
13617 return OP_X86_ADD_REG_MEMBASE;
13619 return OP_X86_SUB_REG_MEMBASE;
13621 return OP_X86_AND_REG_MEMBASE;
13623 return OP_X86_OR_REG_MEMBASE;
13625 return OP_X86_XOR_REG_MEMBASE;
13629 #ifdef TARGET_AMD64
13630 #ifdef __mono_ilp32__
13631 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13633 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13637 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13639 return OP_X86_ADD_REG_MEMBASE;
13641 return OP_X86_SUB_REG_MEMBASE;
13643 return OP_X86_AND_REG_MEMBASE;
13645 return OP_X86_OR_REG_MEMBASE;
13647 return OP_X86_XOR_REG_MEMBASE;
13649 #ifdef __mono_ilp32__
13650 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13652 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13657 return OP_AMD64_COMPARE_REG_MEMBASE;
13659 return OP_AMD64_ADD_REG_MEMBASE;
13661 return OP_AMD64_SUB_REG_MEMBASE;
13663 return OP_AMD64_AND_REG_MEMBASE;
13665 return OP_AMD64_OR_REG_MEMBASE;
13667 return OP_AMD64_XOR_REG_MEMBASE;
13676 mono_op_to_op_imm_noemul (int opcode)
13679 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13685 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13692 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13697 return mono_op_to_op_imm (opcode);
13702 * mono_handle_global_vregs:
13704 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13708 mono_handle_global_vregs (MonoCompile *cfg)
13710 gint32 *vreg_to_bb;
13711 MonoBasicBlock *bb;
13714 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13716 #ifdef MONO_ARCH_SIMD_INTRINSICS
13717 if (cfg->uses_simd_intrinsics)
13718 mono_simd_simplify_indirection (cfg);
13721 /* Find local vregs used in more than one bb */
13722 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13723 MonoInst *ins = bb->code;
13724 int block_num = bb->block_num;
13726 if (cfg->verbose_level > 2)
13727 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13730 for (; ins; ins = ins->next) {
13731 const char *spec = INS_INFO (ins->opcode);
13732 int regtype = 0, regindex;
13735 if (G_UNLIKELY (cfg->verbose_level > 2))
13736 mono_print_ins (ins);
13738 g_assert (ins->opcode >= MONO_CEE_LAST);
13740 for (regindex = 0; regindex < 4; regindex ++) {
13743 if (regindex == 0) {
13744 regtype = spec [MONO_INST_DEST];
13745 if (regtype == ' ')
13748 } else if (regindex == 1) {
13749 regtype = spec [MONO_INST_SRC1];
13750 if (regtype == ' ')
13753 } else if (regindex == 2) {
13754 regtype = spec [MONO_INST_SRC2];
13755 if (regtype == ' ')
13758 } else if (regindex == 3) {
13759 regtype = spec [MONO_INST_SRC3];
13760 if (regtype == ' ')
13765 #if SIZEOF_REGISTER == 4
13766 /* In the LLVM case, the long opcodes are not decomposed */
13767 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13769 * Since some instructions reference the original long vreg,
13770 * and some reference the two component vregs, it is quite hard
13771 * to determine when it needs to be global. So be conservative.
13773 if (!get_vreg_to_inst (cfg, vreg)) {
13774 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13776 if (cfg->verbose_level > 2)
13777 printf ("LONG VREG R%d made global.\n", vreg);
13781 * Make the component vregs volatile since the optimizations can
13782 * get confused otherwise.
13784 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13785 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13789 g_assert (vreg != -1);
13791 prev_bb = vreg_to_bb [vreg];
13792 if (prev_bb == 0) {
13793 /* 0 is a valid block num */
13794 vreg_to_bb [vreg] = block_num + 1;
13795 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13796 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13799 if (!get_vreg_to_inst (cfg, vreg)) {
13800 if (G_UNLIKELY (cfg->verbose_level > 2))
13801 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13805 if (vreg_is_ref (cfg, vreg))
13806 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13808 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13811 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13814 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13817 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13820 g_assert_not_reached ();
13824 /* Flag as having been used in more than one bb */
13825 vreg_to_bb [vreg] = -1;
13831 /* If a variable is used in only one bblock, convert it into a local vreg */
13832 for (i = 0; i < cfg->num_varinfo; i++) {
13833 MonoInst *var = cfg->varinfo [i];
13834 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13836 switch (var->type) {
13842 #if SIZEOF_REGISTER == 8
13845 #if !defined(TARGET_X86)
13846 /* Enabling this screws up the fp stack on x86 */
13849 if (mono_arch_is_soft_float ())
13852 /* Arguments are implicitly global */
13853 /* Putting R4 vars into registers doesn't work currently */
13854 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13855 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13857 * Make that the variable's liveness interval doesn't contain a call, since
13858 * that would cause the lvreg to be spilled, making the whole optimization
13861 /* This is too slow for JIT compilation */
13863 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13865 int def_index, call_index, ins_index;
13866 gboolean spilled = FALSE;
13871 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13872 const char *spec = INS_INFO (ins->opcode);
13874 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13875 def_index = ins_index;
13877 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13878 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13879 if (call_index > def_index) {
13885 if (MONO_IS_CALL (ins))
13886 call_index = ins_index;
13896 if (G_UNLIKELY (cfg->verbose_level > 2))
13897 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13898 var->flags |= MONO_INST_IS_DEAD;
13899 cfg->vreg_to_inst [var->dreg] = NULL;
13906 * Compress the varinfo and vars tables so the liveness computation is faster and
13907 * takes up less space.
13910 for (i = 0; i < cfg->num_varinfo; ++i) {
13911 MonoInst *var = cfg->varinfo [i];
13912 if (pos < i && cfg->locals_start == i)
13913 cfg->locals_start = pos;
13914 if (!(var->flags & MONO_INST_IS_DEAD)) {
13916 cfg->varinfo [pos] = cfg->varinfo [i];
13917 cfg->varinfo [pos]->inst_c0 = pos;
13918 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13919 cfg->vars [pos].idx = pos;
13920 #if SIZEOF_REGISTER == 4
13921 if (cfg->varinfo [pos]->type == STACK_I8) {
13922 /* Modify the two component vars too */
13925 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13926 var1->inst_c0 = pos;
13927 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13928 var1->inst_c0 = pos;
13935 cfg->num_varinfo = pos;
13936 if (cfg->locals_start > cfg->num_varinfo)
13937 cfg->locals_start = cfg->num_varinfo;
13941 * mono_spill_global_vars:
13943 * Generate spill code for variables which are not allocated to registers,
13944 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13945 * code is generated which could be optimized by the local optimization passes.
13948 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13950 MonoBasicBlock *bb;
13952 int orig_next_vreg;
13953 guint32 *vreg_to_lvreg;
13955 guint32 i, lvregs_len;
13956 gboolean dest_has_lvreg = FALSE;
13957 guint32 stacktypes [128];
13958 MonoInst **live_range_start, **live_range_end;
13959 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13960 int *gsharedvt_vreg_to_idx = NULL;
13962 *need_local_opts = FALSE;
13964 memset (spec2, 0, sizeof (spec2));
13966 /* FIXME: Move this function to mini.c */
13967 stacktypes ['i'] = STACK_PTR;
13968 stacktypes ['l'] = STACK_I8;
13969 stacktypes ['f'] = STACK_R8;
13970 #ifdef MONO_ARCH_SIMD_INTRINSICS
13971 stacktypes ['x'] = STACK_VTYPE;
13974 #if SIZEOF_REGISTER == 4
13975 /* Create MonoInsts for longs */
13976 for (i = 0; i < cfg->num_varinfo; i++) {
13977 MonoInst *ins = cfg->varinfo [i];
13979 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13980 switch (ins->type) {
13985 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13988 g_assert (ins->opcode == OP_REGOFFSET);
13990 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13992 tree->opcode = OP_REGOFFSET;
13993 tree->inst_basereg = ins->inst_basereg;
13994 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13996 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13998 tree->opcode = OP_REGOFFSET;
13999 tree->inst_basereg = ins->inst_basereg;
14000 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14010 if (cfg->compute_gc_maps) {
14011 /* registers need liveness info even for !non refs */
14012 for (i = 0; i < cfg->num_varinfo; i++) {
14013 MonoInst *ins = cfg->varinfo [i];
14015 if (ins->opcode == OP_REGVAR)
14016 ins->flags |= MONO_INST_GC_TRACK;
14020 if (cfg->gsharedvt) {
14021 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14023 for (i = 0; i < cfg->num_varinfo; ++i) {
14024 MonoInst *ins = cfg->varinfo [i];
14027 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
14028 if (i >= cfg->locals_start) {
14030 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14031 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14032 ins->opcode = OP_GSHAREDVT_LOCAL;
14033 ins->inst_imm = idx;
14036 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14037 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14043 /* FIXME: widening and truncation */
14046 * As an optimization, when a variable allocated to the stack is first loaded into
14047 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14048 * the variable again.
14050 orig_next_vreg = cfg->next_vreg;
14051 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14052 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14056 * These arrays contain the first and last instructions accessing a given
14058 * Since we emit bblocks in the same order we process them here, and we
14059 * don't split live ranges, these will precisely describe the live range of
14060 * the variable, i.e. the instruction range where a valid value can be found
14061 * in the variables location.
14062 * The live range is computed using the liveness info computed by the liveness pass.
14063 * We can't use vmv->range, since that is an abstract live range, and we need
14064 * one which is instruction precise.
14065 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14067 /* FIXME: Only do this if debugging info is requested */
14068 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14069 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14070 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14071 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14073 /* Add spill loads/stores */
14074 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14077 if (cfg->verbose_level > 2)
14078 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14080 /* Clear vreg_to_lvreg array */
14081 for (i = 0; i < lvregs_len; i++)
14082 vreg_to_lvreg [lvregs [i]] = 0;
14086 MONO_BB_FOR_EACH_INS (bb, ins) {
14087 const char *spec = INS_INFO (ins->opcode);
14088 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14089 gboolean store, no_lvreg;
14090 int sregs [MONO_MAX_SRC_REGS];
14092 if (G_UNLIKELY (cfg->verbose_level > 2))
14093 mono_print_ins (ins);
14095 if (ins->opcode == OP_NOP)
14099 * We handle LDADDR here as well, since it can only be decomposed
14100 * when variable addresses are known.
14102 if (ins->opcode == OP_LDADDR) {
14103 MonoInst *var = ins->inst_p0;
14105 if (var->opcode == OP_VTARG_ADDR) {
14106 /* Happens on SPARC/S390 where vtypes are passed by reference */
14107 MonoInst *vtaddr = var->inst_left;
14108 if (vtaddr->opcode == OP_REGVAR) {
14109 ins->opcode = OP_MOVE;
14110 ins->sreg1 = vtaddr->dreg;
14112 else if (var->inst_left->opcode == OP_REGOFFSET) {
14113 ins->opcode = OP_LOAD_MEMBASE;
14114 ins->inst_basereg = vtaddr->inst_basereg;
14115 ins->inst_offset = vtaddr->inst_offset;
14118 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14119 /* gsharedvt arg passed by ref */
14120 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14122 ins->opcode = OP_LOAD_MEMBASE;
14123 ins->inst_basereg = var->inst_basereg;
14124 ins->inst_offset = var->inst_offset;
14125 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14126 MonoInst *load, *load2, *load3;
14127 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14128 int reg1, reg2, reg3;
14129 MonoInst *info_var = cfg->gsharedvt_info_var;
14130 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14134 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14137 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14139 g_assert (info_var);
14140 g_assert (locals_var);
14142 /* Mark the instruction used to compute the locals var as used */
14143 cfg->gsharedvt_locals_var_ins = NULL;
14145 /* Load the offset */
14146 if (info_var->opcode == OP_REGOFFSET) {
14147 reg1 = alloc_ireg (cfg);
14148 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14149 } else if (info_var->opcode == OP_REGVAR) {
14151 reg1 = info_var->dreg;
14153 g_assert_not_reached ();
14155 reg2 = alloc_ireg (cfg);
14156 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14157 /* Load the locals area address */
14158 reg3 = alloc_ireg (cfg);
14159 if (locals_var->opcode == OP_REGOFFSET) {
14160 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14161 } else if (locals_var->opcode == OP_REGVAR) {
14162 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14164 g_assert_not_reached ();
14166 /* Compute the address */
14167 ins->opcode = OP_PADD;
14171 mono_bblock_insert_before_ins (bb, ins, load3);
14172 mono_bblock_insert_before_ins (bb, load3, load2);
14174 mono_bblock_insert_before_ins (bb, load2, load);
14176 g_assert (var->opcode == OP_REGOFFSET);
14178 ins->opcode = OP_ADD_IMM;
14179 ins->sreg1 = var->inst_basereg;
14180 ins->inst_imm = var->inst_offset;
14183 *need_local_opts = TRUE;
14184 spec = INS_INFO (ins->opcode);
14187 if (ins->opcode < MONO_CEE_LAST) {
14188 mono_print_ins (ins);
14189 g_assert_not_reached ();
14193 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14197 if (MONO_IS_STORE_MEMBASE (ins)) {
14198 tmp_reg = ins->dreg;
14199 ins->dreg = ins->sreg2;
14200 ins->sreg2 = tmp_reg;
14203 spec2 [MONO_INST_DEST] = ' ';
14204 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14205 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14206 spec2 [MONO_INST_SRC3] = ' ';
14208 } else if (MONO_IS_STORE_MEMINDEX (ins))
14209 g_assert_not_reached ();
14214 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14215 printf ("\t %.3s %d", spec, ins->dreg);
14216 num_sregs = mono_inst_get_src_registers (ins, sregs);
14217 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14218 printf (" %d", sregs [srcindex]);
14225 regtype = spec [MONO_INST_DEST];
14226 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14229 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14230 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14231 MonoInst *store_ins;
14233 MonoInst *def_ins = ins;
14234 int dreg = ins->dreg; /* The original vreg */
14236 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14238 if (var->opcode == OP_REGVAR) {
14239 ins->dreg = var->dreg;
14240 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14242 * Instead of emitting a load+store, use a _membase opcode.
14244 g_assert (var->opcode == OP_REGOFFSET);
14245 if (ins->opcode == OP_MOVE) {
14249 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14250 ins->inst_basereg = var->inst_basereg;
14251 ins->inst_offset = var->inst_offset;
14254 spec = INS_INFO (ins->opcode);
14258 g_assert (var->opcode == OP_REGOFFSET);
14260 prev_dreg = ins->dreg;
14262 /* Invalidate any previous lvreg for this vreg */
14263 vreg_to_lvreg [ins->dreg] = 0;
14267 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14269 store_opcode = OP_STOREI8_MEMBASE_REG;
14272 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14274 #if SIZEOF_REGISTER != 8
14275 if (regtype == 'l') {
14276 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14277 mono_bblock_insert_after_ins (bb, ins, store_ins);
14278 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14279 mono_bblock_insert_after_ins (bb, ins, store_ins);
14280 def_ins = store_ins;
14285 g_assert (store_opcode != OP_STOREV_MEMBASE);
14287 /* Try to fuse the store into the instruction itself */
14288 /* FIXME: Add more instructions */
14289 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14290 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14291 ins->inst_imm = ins->inst_c0;
14292 ins->inst_destbasereg = var->inst_basereg;
14293 ins->inst_offset = var->inst_offset;
14294 spec = INS_INFO (ins->opcode);
14295 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14296 ins->opcode = store_opcode;
14297 ins->inst_destbasereg = var->inst_basereg;
14298 ins->inst_offset = var->inst_offset;
14302 tmp_reg = ins->dreg;
14303 ins->dreg = ins->sreg2;
14304 ins->sreg2 = tmp_reg;
14307 spec2 [MONO_INST_DEST] = ' ';
14308 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14309 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14310 spec2 [MONO_INST_SRC3] = ' ';
14312 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14313 // FIXME: The backends expect the base reg to be in inst_basereg
14314 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14316 ins->inst_basereg = var->inst_basereg;
14317 ins->inst_offset = var->inst_offset;
14318 spec = INS_INFO (ins->opcode);
14320 /* printf ("INS: "); mono_print_ins (ins); */
14321 /* Create a store instruction */
14322 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14324 /* Insert it after the instruction */
14325 mono_bblock_insert_after_ins (bb, ins, store_ins);
14327 def_ins = store_ins;
14330 * We can't assign ins->dreg to var->dreg here, since the
14331 * sregs could use it. So set a flag, and do it after
14334 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14335 dest_has_lvreg = TRUE;
14340 if (def_ins && !live_range_start [dreg]) {
14341 live_range_start [dreg] = def_ins;
14342 live_range_start_bb [dreg] = bb;
14345 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14348 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14349 tmp->inst_c1 = dreg;
14350 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14357 num_sregs = mono_inst_get_src_registers (ins, sregs);
14358 for (srcindex = 0; srcindex < 3; ++srcindex) {
14359 regtype = spec [MONO_INST_SRC1 + srcindex];
14360 sreg = sregs [srcindex];
14362 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14363 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14364 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14365 MonoInst *use_ins = ins;
14366 MonoInst *load_ins;
14367 guint32 load_opcode;
14369 if (var->opcode == OP_REGVAR) {
14370 sregs [srcindex] = var->dreg;
14371 //mono_inst_set_src_registers (ins, sregs);
14372 live_range_end [sreg] = use_ins;
14373 live_range_end_bb [sreg] = bb;
14375 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14378 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14379 /* var->dreg is a hreg */
14380 tmp->inst_c1 = sreg;
14381 mono_bblock_insert_after_ins (bb, ins, tmp);
14387 g_assert (var->opcode == OP_REGOFFSET);
14389 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14391 g_assert (load_opcode != OP_LOADV_MEMBASE);
14393 if (vreg_to_lvreg [sreg]) {
14394 g_assert (vreg_to_lvreg [sreg] != -1);
14396 /* The variable is already loaded to an lvreg */
14397 if (G_UNLIKELY (cfg->verbose_level > 2))
14398 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14399 sregs [srcindex] = vreg_to_lvreg [sreg];
14400 //mono_inst_set_src_registers (ins, sregs);
14404 /* Try to fuse the load into the instruction */
14405 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14406 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14407 sregs [0] = var->inst_basereg;
14408 //mono_inst_set_src_registers (ins, sregs);
14409 ins->inst_offset = var->inst_offset;
14410 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14411 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14412 sregs [1] = var->inst_basereg;
14413 //mono_inst_set_src_registers (ins, sregs);
14414 ins->inst_offset = var->inst_offset;
14416 if (MONO_IS_REAL_MOVE (ins)) {
14417 ins->opcode = OP_NOP;
14420 //printf ("%d ", srcindex); mono_print_ins (ins);
14422 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14424 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14425 if (var->dreg == prev_dreg) {
14427 * sreg refers to the value loaded by the load
14428 * emitted below, but we need to use ins->dreg
14429 * since it refers to the store emitted earlier.
14433 g_assert (sreg != -1);
14434 vreg_to_lvreg [var->dreg] = sreg;
14435 g_assert (lvregs_len < 1024);
14436 lvregs [lvregs_len ++] = var->dreg;
14440 sregs [srcindex] = sreg;
14441 //mono_inst_set_src_registers (ins, sregs);
14443 #if SIZEOF_REGISTER != 8
14444 if (regtype == 'l') {
14445 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14446 mono_bblock_insert_before_ins (bb, ins, load_ins);
14447 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14448 mono_bblock_insert_before_ins (bb, ins, load_ins);
14449 use_ins = load_ins;
14454 #if SIZEOF_REGISTER == 4
14455 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14457 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14458 mono_bblock_insert_before_ins (bb, ins, load_ins);
14459 use_ins = load_ins;
14463 if (var->dreg < orig_next_vreg) {
14464 live_range_end [var->dreg] = use_ins;
14465 live_range_end_bb [var->dreg] = bb;
14468 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14471 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14472 tmp->inst_c1 = var->dreg;
14473 mono_bblock_insert_after_ins (bb, ins, tmp);
14477 mono_inst_set_src_registers (ins, sregs);
14479 if (dest_has_lvreg) {
14480 g_assert (ins->dreg != -1);
14481 vreg_to_lvreg [prev_dreg] = ins->dreg;
14482 g_assert (lvregs_len < 1024);
14483 lvregs [lvregs_len ++] = prev_dreg;
14484 dest_has_lvreg = FALSE;
14488 tmp_reg = ins->dreg;
14489 ins->dreg = ins->sreg2;
14490 ins->sreg2 = tmp_reg;
14493 if (MONO_IS_CALL (ins)) {
14494 /* Clear vreg_to_lvreg array */
14495 for (i = 0; i < lvregs_len; i++)
14496 vreg_to_lvreg [lvregs [i]] = 0;
14498 } else if (ins->opcode == OP_NOP) {
14500 MONO_INST_NULLIFY_SREGS (ins);
14503 if (cfg->verbose_level > 2)
14504 mono_print_ins_index (1, ins);
14507 /* Extend the live range based on the liveness info */
14508 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14509 for (i = 0; i < cfg->num_varinfo; i ++) {
14510 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14512 if (vreg_is_volatile (cfg, vi->vreg))
14513 /* The liveness info is incomplete */
14516 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14517 /* Live from at least the first ins of this bb */
14518 live_range_start [vi->vreg] = bb->code;
14519 live_range_start_bb [vi->vreg] = bb;
14522 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14523 /* Live at least until the last ins of this bb */
14524 live_range_end [vi->vreg] = bb->last_ins;
14525 live_range_end_bb [vi->vreg] = bb;
14531 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14533 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14534 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14536 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14537 for (i = 0; i < cfg->num_varinfo; ++i) {
14538 int vreg = MONO_VARINFO (cfg, i)->vreg;
14541 if (live_range_start [vreg]) {
14542 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14544 ins->inst_c1 = vreg;
14545 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14547 if (live_range_end [vreg]) {
14548 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14550 ins->inst_c1 = vreg;
14551 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14552 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14554 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14560 if (cfg->gsharedvt_locals_var_ins) {
14561 /* Nullify if unused */
14562 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14563 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14566 g_free (live_range_start);
14567 g_free (live_range_end);
14568 g_free (live_range_start_bb);
14569 g_free (live_range_end_bb);
14574 * - use 'iadd' instead of 'int_add'
14575 * - handling ovf opcodes: decompose in method_to_ir.
14576 * - unify iregs/fregs
14577 * -> partly done, the missing parts are:
14578 * - a more complete unification would involve unifying the hregs as well, so
14579 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14580 * would no longer map to the machine hregs, so the code generators would need to
14581 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14582 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14583 * fp/non-fp branches speeds it up by about 15%.
14584 * - use sext/zext opcodes instead of shifts
14586 * - get rid of TEMPLOADs if possible and use vregs instead
14587 * - clean up usage of OP_P/OP_ opcodes
14588 * - cleanup usage of DUMMY_USE
14589 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14591 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14592 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14593 * - make sure handle_stack_args () is called before the branch is emitted
14594 * - when the new IR is done, get rid of all unused stuff
14595 * - COMPARE/BEQ as separate instructions or unify them ?
14596 * - keeping them separate allows specialized compare instructions like
14597 * compare_imm, compare_membase
14598 * - most back ends unify fp compare+branch, fp compare+ceq
14599 * - integrate mono_save_args into inline_method
14600 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14601 * - handle long shift opts on 32 bit platforms somehow: they require
14602 * 3 sregs (2 for arg1 and 1 for arg2)
14603 * - make byref a 'normal' type.
14604 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14605 * variable if needed.
14606 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14607 * like inline_method.
14608 * - remove inlining restrictions
14609 * - fix LNEG and enable cfold of INEG
14610 * - generalize x86 optimizations like ldelema as a peephole optimization
14611 * - add store_mem_imm for amd64
14612 * - optimize the loading of the interruption flag in the managed->native wrappers
14613 * - avoid special handling of OP_NOP in passes
14614 * - move code inserting instructions into one function/macro.
14615 * - try a coalescing phase after liveness analysis
14616 * - add float -> vreg conversion + local optimizations on !x86
14617 * - figure out how to handle decomposed branches during optimizations, ie.
14618 * compare+branch, op_jump_table+op_br etc.
14619 * - promote RuntimeXHandles to vregs
14620 * - vtype cleanups:
14621 * - add a NEW_VARLOADA_VREG macro
14622 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14623 * accessing vtype fields.
14624 * - get rid of I8CONST on 64 bit platforms
14625 * - dealing with the increase in code size due to branches created during opcode
14627 * - use extended basic blocks
14628 * - all parts of the JIT
14629 * - handle_global_vregs () && local regalloc
14630 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14631 * - sources of increase in code size:
14634 * - isinst and castclass
14635 * - lvregs not allocated to global registers even if used multiple times
14636 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14638 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14639 * - add all micro optimizations from the old JIT
14640 * - put tree optimizations into the deadce pass
14641 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14642 * specific function.
14643 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14644 * fcompare + branchCC.
14645 * - create a helper function for allocating a stack slot, taking into account
14646 * MONO_CFG_HAS_SPILLUP.
14648 * - merge the ia64 switch changes.
14649 * - optimize mono_regstate2_alloc_int/float.
14650 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14651 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14652 * parts of the tree could be separated by other instructions, killing the tree
14653 * arguments, or stores killing loads etc. Also, should we fold loads into other
14654 * instructions if the result of the load is used multiple times ?
14655 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14656 * - LAST MERGE: 108395.
14657 * - when returning vtypes in registers, generate IR and append it to the end of the
14658 * last bb instead of doing it in the epilog.
14659 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14667 - When to decompose opcodes:
14668 - earlier: this makes some optimizations hard to implement, since the low level IR
14669 no longer contains the neccessary information. But it is easier to do.
14670 - later: harder to implement, enables more optimizations.
14671 - Branches inside bblocks:
14672 - created when decomposing complex opcodes.
14673 - branches to another bblock: harmless, but not tracked by the branch
14674 optimizations, so need to branch to a label at the start of the bblock.
14675 - branches to inside the same bblock: very problematic, trips up the local
14676 reg allocator. Can be fixed by spitting the current bblock, but that is a
14677 complex operation, since some local vregs can become global vregs etc.
14678 - Local/global vregs:
14679 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14680 local register allocator.
14681 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14682 structure, created by mono_create_var (). Assigned to hregs or the stack by
14683 the global register allocator.
14684 - When to do optimizations like alu->alu_imm:
14685 - earlier -> saves work later on since the IR will be smaller/simpler
14686 - later -> can work on more instructions
14687 - Handling of valuetypes:
14688 - When a vtype is pushed on the stack, a new temporary is created, an
14689 instruction computing its address (LDADDR) is emitted and pushed on
14690 the stack. Need to optimize cases when the vtype is used immediately as in
14691 argument passing, stloc etc.
14692 - Instead of the to_end stuff in the old JIT, simply call the function handling
14693 the values on the stack before emitting the last instruction of the bb.
14696 #endif /* DISABLE_JIT */