2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 #if SIZEOF_REGISTER == 8
463 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
464 /* FIXME: Need to add many more cases */ \
465 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
467 int dr = alloc_preg (cfg); \
468 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
469 (ins)->sreg2 = widen->dreg; \
473 #define ADD_WIDEN_OP(ins, arg1, arg2)
476 #define ADD_BINOP(op) do { \
477 MONO_INST_NEW (cfg, ins, (op)); \
479 ins->sreg1 = sp [0]->dreg; \
480 ins->sreg2 = sp [1]->dreg; \
481 type_from_op (ins, sp [0], sp [1]); \
483 /* Have to insert a widening op */ \
484 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
485 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
486 MONO_ADD_INS ((cfg)->cbb, (ins)); \
487 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
490 #define ADD_UNOP(op) do { \
491 MONO_INST_NEW (cfg, ins, (op)); \
493 ins->sreg1 = sp [0]->dreg; \
494 type_from_op (ins, sp [0], NULL); \
496 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
497 MONO_ADD_INS ((cfg)->cbb, (ins)); \
498 *sp++ = mono_decompose_opcode (cfg, ins); \
501 #define ADD_BINCOND(next_block) do { \
504 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
505 cmp->sreg1 = sp [0]->dreg; \
506 cmp->sreg2 = sp [1]->dreg; \
507 type_from_op (cmp, sp [0], sp [1]); \
509 type_from_op (ins, sp [0], sp [1]); \
510 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
511 GET_BBLOCK (cfg, tblock, target); \
512 link_bblock (cfg, bblock, tblock); \
513 ins->inst_true_bb = tblock; \
514 if ((next_block)) { \
515 link_bblock (cfg, bblock, (next_block)); \
516 ins->inst_false_bb = (next_block); \
517 start_new_bblock = 1; \
519 GET_BBLOCK (cfg, tblock, ip); \
520 link_bblock (cfg, bblock, tblock); \
521 ins->inst_false_bb = tblock; \
522 start_new_bblock = 2; \
524 if (sp != stack_start) { \
525 handle_stack_args (cfg, stack_start, sp - stack_start); \
526 CHECK_UNVERIFIABLE (cfg); \
528 MONO_ADD_INS (bblock, cmp); \
529 MONO_ADD_INS (bblock, ins); \
533 * link_bblock: Links two basic blocks
535 * links two basic blocks in the control flow graph, the 'from'
536 * argument is the starting block and the 'to' argument is the block
537 * the control flow ends to after 'from'.
540 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 MonoBasicBlock **newa;
546 if (from->cil_code) {
548 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
550 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
553 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
555 printf ("edge from entry to exit\n");
560 for (i = 0; i < from->out_count; ++i) {
561 if (to == from->out_bb [i]) {
567 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
568 for (i = 0; i < from->out_count; ++i) {
569 newa [i] = from->out_bb [i];
577 for (i = 0; i < to->in_count; ++i) {
578 if (from == to->in_bb [i]) {
584 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
585 for (i = 0; i < to->in_count; ++i) {
586 newa [i] = to->in_bb [i];
595 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
597 link_bblock (cfg, from, to);
601 * mono_find_block_region:
603 * We mark each basic block with a region ID. We use that to avoid BB
604 * optimizations when blocks are in different regions.
607 * A region token that encodes where this region is, and information
608 * about the clause owner for this block.
610 * The region encodes the try/catch/filter clause that owns this block
611 * as well as the type. -1 is a special value that represents a block
612 * that is in none of try/catch/filter.
615 mono_find_block_region (MonoCompile *cfg, int offset)
617 MonoMethodHeader *header = cfg->header;
618 MonoExceptionClause *clause;
621 for (i = 0; i < header->num_clauses; ++i) {
622 clause = &header->clauses [i];
623 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
624 (offset < (clause->handler_offset)))
625 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
627 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
628 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
629 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
630 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
631 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
633 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
636 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
637 return ((i + 1) << 8) | clause->flags;
644 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
654 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
655 if (clause->flags == type)
656 res = g_list_append (res, clause);
663 mono_create_spvar_for_region (MonoCompile *cfg, int region)
667 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
671 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
672 /* prevent it from being register allocated */
673 var->flags |= MONO_INST_VOLATILE;
675 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
679 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
681 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
685 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
689 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
693 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
694 /* prevent it from being register allocated */
695 var->flags |= MONO_INST_VOLATILE;
697 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
703 * Returns the type used in the eval stack when @type is loaded.
704 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
707 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
711 type = mini_replace_type (type);
712 inst->klass = klass = mono_class_from_mono_type (type);
714 inst->type = STACK_MP;
719 switch (type->type) {
721 inst->type = STACK_INV;
725 case MONO_TYPE_BOOLEAN:
731 inst->type = STACK_I4;
736 case MONO_TYPE_FNPTR:
737 inst->type = STACK_PTR;
739 case MONO_TYPE_CLASS:
740 case MONO_TYPE_STRING:
741 case MONO_TYPE_OBJECT:
742 case MONO_TYPE_SZARRAY:
743 case MONO_TYPE_ARRAY:
744 inst->type = STACK_OBJ;
748 inst->type = STACK_I8;
752 inst->type = STACK_R8;
754 case MONO_TYPE_VALUETYPE:
755 if (type->data.klass->enumtype) {
756 type = mono_class_enum_basetype (type->data.klass);
760 inst->type = STACK_VTYPE;
763 case MONO_TYPE_TYPEDBYREF:
764 inst->klass = mono_defaults.typed_reference_class;
765 inst->type = STACK_VTYPE;
767 case MONO_TYPE_GENERICINST:
768 type = &type->data.generic_class->container_class->byval_arg;
772 g_assert (cfg->generic_sharing_context);
773 if (mini_is_gsharedvt_type (cfg, type)) {
774 g_assert (cfg->gsharedvt);
775 inst->type = STACK_VTYPE;
777 inst->type = STACK_OBJ;
781 g_error ("unknown type 0x%02x in eval stack type", type->type);
786 * The following tables are used to quickly validate the IL code in type_from_op ().
789 bin_num_table [STACK_MAX] [STACK_MAX] = {
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
795 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
796 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
797 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
802 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
805 /* reduce the size of this table */
807 bin_int_table [STACK_MAX] [STACK_MAX] = {
808 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
819 bin_comp_table [STACK_MAX] [STACK_MAX] = {
820 /* Inv i L p F & O vt */
822 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
823 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
824 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
825 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
826 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
827 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
828 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
831 /* reduce the size of this table */
833 shift_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
845 * Tables to map from the non-specific opcode to the matching
846 * type-specific opcode.
848 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
850 binops_op_map [STACK_MAX] = {
851 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
854 /* handles from CEE_NEG to CEE_CONV_U8 */
856 unops_op_map [STACK_MAX] = {
857 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
860 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
862 ovfops_op_map [STACK_MAX] = {
863 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
866 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
868 ovf2ops_op_map [STACK_MAX] = {
869 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
872 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
874 ovf3ops_op_map [STACK_MAX] = {
875 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
878 /* handles from CEE_BEQ to CEE_BLT_UN */
880 beqops_op_map [STACK_MAX] = {
881 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
884 /* handles from CEE_CEQ to CEE_CLT_UN */
886 ceqops_op_map [STACK_MAX] = {
887 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
891 * Sets ins->type (the type on the eval stack) according to the
892 * type of the opcode and the arguments to it.
893 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
895 * FIXME: this function sets ins->type unconditionally in some cases, but
896 * it should set it to invalid for some types (a conv.x on an object)
899 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
901 switch (ins->opcode) {
908 /* FIXME: check unverifiable args for STACK_MP */
909 ins->type = bin_num_table [src1->type] [src2->type];
910 ins->opcode += binops_op_map [ins->type];
917 ins->type = bin_int_table [src1->type] [src2->type];
918 ins->opcode += binops_op_map [ins->type];
923 ins->type = shift_table [src1->type] [src2->type];
924 ins->opcode += binops_op_map [ins->type];
929 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
930 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
931 ins->opcode = OP_LCOMPARE;
932 else if (src1->type == STACK_R8)
933 ins->opcode = OP_FCOMPARE;
935 ins->opcode = OP_ICOMPARE;
937 case OP_ICOMPARE_IMM:
938 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
939 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
940 ins->opcode = OP_LCOMPARE_IMM;
952 ins->opcode += beqops_op_map [src1->type];
955 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
956 ins->opcode += ceqops_op_map [src1->type];
962 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
963 ins->opcode += ceqops_op_map [src1->type];
967 ins->type = neg_table [src1->type];
968 ins->opcode += unops_op_map [ins->type];
971 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
972 ins->type = src1->type;
974 ins->type = STACK_INV;
975 ins->opcode += unops_op_map [ins->type];
981 ins->type = STACK_I4;
982 ins->opcode += unops_op_map [src1->type];
985 ins->type = STACK_R8;
986 switch (src1->type) {
989 ins->opcode = OP_ICONV_TO_R_UN;
992 ins->opcode = OP_LCONV_TO_R_UN;
996 case CEE_CONV_OVF_I1:
997 case CEE_CONV_OVF_U1:
998 case CEE_CONV_OVF_I2:
999 case CEE_CONV_OVF_U2:
1000 case CEE_CONV_OVF_I4:
1001 case CEE_CONV_OVF_U4:
1002 ins->type = STACK_I4;
1003 ins->opcode += ovf3ops_op_map [src1->type];
1005 case CEE_CONV_OVF_I_UN:
1006 case CEE_CONV_OVF_U_UN:
1007 ins->type = STACK_PTR;
1008 ins->opcode += ovf2ops_op_map [src1->type];
1010 case CEE_CONV_OVF_I1_UN:
1011 case CEE_CONV_OVF_I2_UN:
1012 case CEE_CONV_OVF_I4_UN:
1013 case CEE_CONV_OVF_U1_UN:
1014 case CEE_CONV_OVF_U2_UN:
1015 case CEE_CONV_OVF_U4_UN:
1016 ins->type = STACK_I4;
1017 ins->opcode += ovf2ops_op_map [src1->type];
1020 ins->type = STACK_PTR;
1021 switch (src1->type) {
1023 ins->opcode = OP_ICONV_TO_U;
1027 #if SIZEOF_VOID_P == 8
1028 ins->opcode = OP_LCONV_TO_U;
1030 ins->opcode = OP_MOVE;
1034 ins->opcode = OP_LCONV_TO_U;
1037 ins->opcode = OP_FCONV_TO_U;
1043 ins->type = STACK_I8;
1044 ins->opcode += unops_op_map [src1->type];
1046 case CEE_CONV_OVF_I8:
1047 case CEE_CONV_OVF_U8:
1048 ins->type = STACK_I8;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_U8_UN:
1052 case CEE_CONV_OVF_I8_UN:
1053 ins->type = STACK_I8;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1058 ins->type = STACK_R8;
1059 ins->opcode += unops_op_map [src1->type];
1062 ins->type = STACK_R8;
1066 ins->type = STACK_I4;
1067 ins->opcode += ovfops_op_map [src1->type];
1070 case CEE_CONV_OVF_I:
1071 case CEE_CONV_OVF_U:
1072 ins->type = STACK_PTR;
1073 ins->opcode += ovfops_op_map [src1->type];
1076 case CEE_ADD_OVF_UN:
1078 case CEE_MUL_OVF_UN:
1080 case CEE_SUB_OVF_UN:
1081 ins->type = bin_num_table [src1->type] [src2->type];
1082 ins->opcode += ovfops_op_map [src1->type];
1083 if (ins->type == STACK_R8)
1084 ins->type = STACK_INV;
1086 case OP_LOAD_MEMBASE:
1087 ins->type = STACK_PTR;
1089 case OP_LOADI1_MEMBASE:
1090 case OP_LOADU1_MEMBASE:
1091 case OP_LOADI2_MEMBASE:
1092 case OP_LOADU2_MEMBASE:
1093 case OP_LOADI4_MEMBASE:
1094 case OP_LOADU4_MEMBASE:
1095 ins->type = STACK_PTR;
1097 case OP_LOADI8_MEMBASE:
1098 ins->type = STACK_I8;
1100 case OP_LOADR4_MEMBASE:
1101 case OP_LOADR8_MEMBASE:
1102 ins->type = STACK_R8;
1105 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1109 if (ins->type == STACK_MP)
1110 ins->klass = mono_defaults.object_class;
1115 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1121 param_table [STACK_MAX] [STACK_MAX] = {
1126 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1130 switch (args->type) {
1140 for (i = 0; i < sig->param_count; ++i) {
1141 switch (args [i].type) {
1145 if (!sig->params [i]->byref)
1149 if (sig->params [i]->byref)
1151 switch (sig->params [i]->type) {
1152 case MONO_TYPE_CLASS:
1153 case MONO_TYPE_STRING:
1154 case MONO_TYPE_OBJECT:
1155 case MONO_TYPE_SZARRAY:
1156 case MONO_TYPE_ARRAY:
1163 if (sig->params [i]->byref)
1165 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1174 /*if (!param_table [args [i].type] [sig->params [i]->type])
1182 * When we need a pointer to the current domain many times in a method, we
1183 * call mono_domain_get() once and we store the result in a local variable.
1184 * This function returns the variable that represents the MonoDomain*.
1186 inline static MonoInst *
1187 mono_get_domainvar (MonoCompile *cfg)
1189 if (!cfg->domainvar)
1190 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1191 return cfg->domainvar;
1195 * The got_var contains the address of the Global Offset Table when AOT
1199 mono_get_got_var (MonoCompile *cfg)
1201 #ifdef MONO_ARCH_NEED_GOT_VAR
1202 if (!cfg->compile_aot)
1204 if (!cfg->got_var) {
1205 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1207 return cfg->got_var;
1214 mono_get_vtable_var (MonoCompile *cfg)
1216 g_assert (cfg->generic_sharing_context);
1218 if (!cfg->rgctx_var) {
1219 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 /* force the var to be stack allocated */
1221 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1224 return cfg->rgctx_var;
1228 type_from_stack_type (MonoInst *ins) {
1229 switch (ins->type) {
1230 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1231 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1232 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1233 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1235 return &ins->klass->this_arg;
1236 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1237 case STACK_VTYPE: return &ins->klass->byval_arg;
1239 g_error ("stack type %d to monotype not handled\n", ins->type);
1244 static G_GNUC_UNUSED int
1245 type_to_stack_type (MonoType *t)
1247 t = mono_type_get_underlying_type (t);
1251 case MONO_TYPE_BOOLEAN:
1254 case MONO_TYPE_CHAR:
1261 case MONO_TYPE_FNPTR:
1263 case MONO_TYPE_CLASS:
1264 case MONO_TYPE_STRING:
1265 case MONO_TYPE_OBJECT:
1266 case MONO_TYPE_SZARRAY:
1267 case MONO_TYPE_ARRAY:
1275 case MONO_TYPE_VALUETYPE:
1276 case MONO_TYPE_TYPEDBYREF:
1278 case MONO_TYPE_GENERICINST:
1279 if (mono_type_generic_inst_is_valuetype (t))
1285 g_assert_not_reached ();
1292 array_access_to_klass (int opcode)
1296 return mono_defaults.byte_class;
1298 return mono_defaults.uint16_class;
1301 return mono_defaults.int_class;
1304 return mono_defaults.sbyte_class;
1307 return mono_defaults.int16_class;
1310 return mono_defaults.int32_class;
1312 return mono_defaults.uint32_class;
1315 return mono_defaults.int64_class;
1318 return mono_defaults.single_class;
1321 return mono_defaults.double_class;
1322 case CEE_LDELEM_REF:
1323 case CEE_STELEM_REF:
1324 return mono_defaults.object_class;
1326 g_assert_not_reached ();
1332 * We try to share variables when possible
1335 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1340 /* inlining can result in deeper stacks */
1341 if (slot >= cfg->header->max_stack)
1342 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1344 pos = ins->type - 1 + slot * STACK_MAX;
1346 switch (ins->type) {
1353 if ((vnum = cfg->intvars [pos]))
1354 return cfg->varinfo [vnum];
1355 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1356 cfg->intvars [pos] = res->inst_c0;
1359 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1365 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1368 * Don't use this if a generic_context is set, since that means AOT can't
1369 * look up the method using just the image+token.
1370 * table == 0 means this is a reference made from a wrapper.
1372 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1373 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1374 jump_info_token->image = image;
1375 jump_info_token->token = token;
1376 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1381 * This function is called to handle items that are left on the evaluation stack
1382 * at basic block boundaries. What happens is that we save the values to local variables
1383 * and we reload them later when first entering the target basic block (with the
1384 * handle_loaded_temps () function).
1385 * A single joint point will use the same variables (stored in the array bb->out_stack or
1386 * bb->in_stack, if the basic block is before or after the joint point).
1388 * This function needs to be called _before_ emitting the last instruction of
1389 * the bb (i.e. before emitting a branch).
1390 * If the stack merge fails at a join point, cfg->unverifiable is set.
1393 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1396 MonoBasicBlock *bb = cfg->cbb;
1397 MonoBasicBlock *outb;
1398 MonoInst *inst, **locals;
1403 if (cfg->verbose_level > 3)
1404 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1405 if (!bb->out_scount) {
1406 bb->out_scount = count;
1407 //printf ("bblock %d has out:", bb->block_num);
1409 for (i = 0; i < bb->out_count; ++i) {
1410 outb = bb->out_bb [i];
1411 /* exception handlers are linked, but they should not be considered for stack args */
1412 if (outb->flags & BB_EXCEPTION_HANDLER)
1414 //printf (" %d", outb->block_num);
1415 if (outb->in_stack) {
1417 bb->out_stack = outb->in_stack;
1423 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1424 for (i = 0; i < count; ++i) {
1426 * try to reuse temps already allocated for this purpouse, if they occupy the same
1427 * stack slot and if they are of the same type.
1428 * This won't cause conflicts since if 'local' is used to
1429 * store one of the values in the in_stack of a bblock, then
1430 * the same variable will be used for the same outgoing stack
1432 * This doesn't work when inlining methods, since the bblocks
1433 * in the inlined methods do not inherit their in_stack from
1434 * the bblock they are inlined to. See bug #58863 for an
1437 if (cfg->inlined_method)
1438 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1440 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1445 for (i = 0; i < bb->out_count; ++i) {
1446 outb = bb->out_bb [i];
1447 /* exception handlers are linked, but they should not be considered for stack args */
1448 if (outb->flags & BB_EXCEPTION_HANDLER)
1450 if (outb->in_scount) {
1451 if (outb->in_scount != bb->out_scount) {
1452 cfg->unverifiable = TRUE;
1455 continue; /* check they are the same locals */
1457 outb->in_scount = count;
1458 outb->in_stack = bb->out_stack;
1461 locals = bb->out_stack;
1463 for (i = 0; i < count; ++i) {
1464 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1465 inst->cil_code = sp [i]->cil_code;
1466 sp [i] = locals [i];
1467 if (cfg->verbose_level > 3)
1468 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1472 * It is possible that the out bblocks already have in_stack assigned, and
1473 * the in_stacks differ. In this case, we will store to all the different
1480 /* Find a bblock which has a different in_stack */
1482 while (bindex < bb->out_count) {
1483 outb = bb->out_bb [bindex];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER) {
1489 if (outb->in_stack != locals) {
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1497 locals = outb->in_stack;
1506 /* Emit code which loads interface_offsets [klass->interface_id]
1507 * The array is stored in memory before vtable.
1510 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1512 if (cfg->compile_aot) {
1513 int ioffset_reg = alloc_preg (cfg);
1514 int iid_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1526 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1528 int ibitmap_reg = alloc_preg (cfg);
1529 #ifdef COMPRESSED_INTERFACE_BITMAP
1531 MonoInst *res, *ins;
1532 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1533 MONO_ADD_INS (cfg->cbb, ins);
1535 if (cfg->compile_aot)
1536 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1538 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1539 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1540 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1542 int ibitmap_byte_reg = alloc_preg (cfg);
1544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1546 if (cfg->compile_aot) {
1547 int iid_reg = alloc_preg (cfg);
1548 int shifted_iid_reg = alloc_preg (cfg);
1549 int ibitmap_byte_address_reg = alloc_preg (cfg);
1550 int masked_iid_reg = alloc_preg (cfg);
1551 int iid_one_bit_reg = alloc_preg (cfg);
1552 int iid_bit_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1558 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1569 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1570 * stored in "klass_reg" implements the interface "klass".
1573 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1575 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1580 * stored in "vtable_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1589 * Emit code which checks whenever the interface id of @klass is smaller than
1590 * than the value given by max_iid_reg.
1593 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1594 MonoBasicBlock *false_target)
1596 if (cfg->compile_aot) {
1597 int iid_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1609 /* Same as above, but obtains max_iid from a vtable */
1611 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 int max_iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1617 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1620 /* Same as above, but obtains max_iid from a klass */
1622 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1623 MonoBasicBlock *false_target)
1625 int max_iid_reg = alloc_preg (cfg);
1627 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1628 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1632 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1634 int idepth_reg = alloc_preg (cfg);
1635 int stypes_reg = alloc_preg (cfg);
1636 int stype = alloc_preg (cfg);
1638 mono_class_setup_supertypes (klass);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1648 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1649 } else if (cfg->compile_aot) {
1650 int const_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1652 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1660 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1662 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1666 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 int intf_reg = alloc_preg (cfg);
1670 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1671 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1676 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1680 * Variant of the above that takes a register to the class, not the vtable.
1683 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1685 int intf_bit_reg = alloc_preg (cfg);
1687 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1688 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1693 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1697 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1700 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1701 } else if (cfg->compile_aot) {
1702 int const_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1712 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1714 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1718 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1720 if (cfg->compile_aot) {
1721 int const_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1731 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1734 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1737 int rank_reg = alloc_preg (cfg);
1738 int eclass_reg = alloc_preg (cfg);
1740 g_assert (!klass_inst);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1744 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1746 if (klass->cast_class == mono_defaults.object_class) {
1747 int parent_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1749 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1750 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1751 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1752 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1753 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1754 } else if (klass->cast_class == mono_defaults.enum_class) {
1755 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1756 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1757 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1759 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1760 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1763 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1764 /* Check that the object is a vector too */
1765 int bounds_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1768 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1771 int idepth_reg = alloc_preg (cfg);
1772 int stypes_reg = alloc_preg (cfg);
1773 int stype = alloc_preg (cfg);
1775 mono_class_setup_supertypes (klass);
1777 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1780 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1784 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1789 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1791 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1795 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1799 g_assert (val == 0);
1804 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1815 #if SIZEOF_REGISTER == 8
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1823 val_reg = alloc_preg (cfg);
1825 if (SIZEOF_REGISTER == 8)
1826 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1828 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1831 /* This could be optimized further if neccesary */
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1873 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1880 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1881 g_assert (size < 10000);
1884 /* This could be optimized further if neccesary */
1886 cur_reg = alloc_preg (cfg);
1887 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1895 #if !NO_UNALIGNED_ACCESS
1896 if (SIZEOF_REGISTER == 8) {
1898 cur_reg = alloc_preg (cfg);
1899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1909 cur_reg = alloc_preg (cfg);
1910 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1939 if (cfg->compile_aot) {
1940 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1941 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1943 ins->sreg2 = c->dreg;
1944 MONO_ADD_INS (cfg->cbb, ins);
1946 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1948 ins->inst_offset = mini_get_tls_offset (tls_key);
1949 MONO_ADD_INS (cfg->cbb, ins);
1956 * Emit IR to push the current LMF onto the LMF stack.
1959 emit_push_lmf (MonoCompile *cfg)
1962 * Emit IR to push the LMF:
1963 * lmf_addr = <lmf_addr from tls>
1964 * lmf->lmf_addr = lmf_addr
1965 * lmf->prev_lmf = *lmf_addr
1968 int lmf_reg, prev_lmf_reg;
1969 MonoInst *ins, *lmf_ins;
1974 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1975 /* Load current lmf */
1976 lmf_ins = mono_get_lmf_intrinsic (cfg);
1978 MONO_ADD_INS (cfg->cbb, lmf_ins);
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1981 /* Save previous_lmf */
1982 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1984 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1987 * Store lmf_addr in a variable, so it can be allocated to a global register.
1989 if (!cfg->lmf_addr_var)
1990 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1993 ins = mono_get_jit_tls_intrinsic (cfg);
1995 int jit_tls_dreg = ins->dreg;
1997 MONO_ADD_INS (cfg->cbb, ins);
1998 lmf_reg = alloc_preg (cfg);
1999 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2001 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2004 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2006 MONO_ADD_INS (cfg->cbb, lmf_ins);
2009 MonoInst *args [16], *jit_tls_ins, *ins;
2011 /* Inline mono_get_lmf_addr () */
2012 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2014 /* Load mono_jit_tls_id */
2015 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2016 /* call pthread_getspecific () */
2017 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2018 /* lmf_addr = &jit_tls->lmf */
2019 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2022 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2028 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2029 lmf_reg = ins->dreg;
2031 prev_lmf_reg = alloc_preg (cfg);
2032 /* Save previous_lmf */
2033 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2034 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2036 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2043 * Emit IR to pop the current LMF from the LMF stack.
2046 emit_pop_lmf (MonoCompile *cfg)
2048 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2054 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2055 lmf_reg = ins->dreg;
2057 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2058 /* Load previous_lmf */
2059 prev_lmf_reg = alloc_preg (cfg);
2060 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2062 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2065 * Emit IR to pop the LMF:
2066 * *(lmf->lmf_addr) = lmf->prev_lmf
2068 /* This could be called before emit_push_lmf () */
2069 if (!cfg->lmf_addr_var)
2070 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2071 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2073 prev_lmf_reg = alloc_preg (cfg);
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2080 emit_instrumentation_call (MonoCompile *cfg, void *func)
2082 MonoInst *iargs [1];
2085 * Avoid instrumenting inlined methods since it can
2086 * distort profiling results.
2088 if (cfg->method != cfg->current_method)
2091 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2092 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2093 mono_emit_jit_icall (cfg, func, iargs);
2098 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2101 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2104 type = mini_get_basic_type_from_generic (gsctx, type);
2105 type = mini_replace_type (type);
2106 switch (type->type) {
2107 case MONO_TYPE_VOID:
2108 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2111 case MONO_TYPE_BOOLEAN:
2114 case MONO_TYPE_CHAR:
2117 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2121 case MONO_TYPE_FNPTR:
2122 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2123 case MONO_TYPE_CLASS:
2124 case MONO_TYPE_STRING:
2125 case MONO_TYPE_OBJECT:
2126 case MONO_TYPE_SZARRAY:
2127 case MONO_TYPE_ARRAY:
2128 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2131 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2134 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2135 case MONO_TYPE_VALUETYPE:
2136 if (type->data.klass->enumtype) {
2137 type = mono_class_enum_basetype (type->data.klass);
2140 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2141 case MONO_TYPE_TYPEDBYREF:
2142 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2143 case MONO_TYPE_GENERICINST:
2144 type = &type->data.generic_class->container_class->byval_arg;
2147 case MONO_TYPE_MVAR:
2149 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2151 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2157 * target_type_is_incompatible:
2158 * @cfg: MonoCompile context
2160 * Check that the item @arg on the evaluation stack can be stored
2161 * in the target type (can be a local, or field, etc).
2162 * The cfg arg can be used to check if we need verification or just
2165 * Returns: non-0 value if arg can't be stored on a target.
2168 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2170 MonoType *simple_type;
2173 target = mini_replace_type (target);
2174 if (target->byref) {
2175 /* FIXME: check that the pointed to types match */
2176 if (arg->type == STACK_MP)
2177 return arg->klass != mono_class_from_mono_type (target);
2178 if (arg->type == STACK_PTR)
2183 simple_type = mono_type_get_underlying_type (target);
2184 switch (simple_type->type) {
2185 case MONO_TYPE_VOID:
2189 case MONO_TYPE_BOOLEAN:
2192 case MONO_TYPE_CHAR:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2229 if (arg->type != STACK_R8)
2232 case MONO_TYPE_VALUETYPE:
2233 if (arg->type != STACK_VTYPE)
2235 klass = mono_class_from_mono_type (simple_type);
2236 if (klass != arg->klass)
2239 case MONO_TYPE_TYPEDBYREF:
2240 if (arg->type != STACK_VTYPE)
2242 klass = mono_class_from_mono_type (simple_type);
2243 if (klass != arg->klass)
2246 case MONO_TYPE_GENERICINST:
2247 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2248 if (arg->type != STACK_VTYPE)
2250 klass = mono_class_from_mono_type (simple_type);
2251 if (klass != arg->klass)
2255 if (arg->type != STACK_OBJ)
2257 /* FIXME: check type compatibility */
2261 case MONO_TYPE_MVAR:
2262 g_assert (cfg->generic_sharing_context);
2263 if (mini_type_var_is_vt (cfg, simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2267 if (arg->type != STACK_OBJ)
2272 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2278 * Prepare arguments for passing to a function call.
2279 * Return a non-zero value if the arguments can't be passed to the given
2281 * The type checks are not yet complete and some conversions may need
2282 * casts on 32 or 64 bit architectures.
2284 * FIXME: implement this using target_type_is_incompatible ()
2287 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2289 MonoType *simple_type;
2293 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2297 for (i = 0; i < sig->param_count; ++i) {
2298 if (sig->params [i]->byref) {
2299 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2303 simple_type = sig->params [i];
2304 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2306 switch (simple_type->type) {
2307 case MONO_TYPE_VOID:
2312 case MONO_TYPE_BOOLEAN:
2315 case MONO_TYPE_CHAR:
2318 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2324 case MONO_TYPE_FNPTR:
2325 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2328 case MONO_TYPE_CLASS:
2329 case MONO_TYPE_STRING:
2330 case MONO_TYPE_OBJECT:
2331 case MONO_TYPE_SZARRAY:
2332 case MONO_TYPE_ARRAY:
2333 if (args [i]->type != STACK_OBJ)
2338 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != STACK_R8)
2346 case MONO_TYPE_VALUETYPE:
2347 if (simple_type->data.klass->enumtype) {
2348 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2351 if (args [i]->type != STACK_VTYPE)
2354 case MONO_TYPE_TYPEDBYREF:
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_GENERICINST:
2359 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2362 case MONO_TYPE_MVAR:
2364 if (args [i]->type != STACK_VTYPE)
2368 g_error ("unknown type 0x%02x in check_call_signature",
2376 callvirt_to_call (int opcode)
2379 case OP_CALL_MEMBASE:
2381 case OP_VOIDCALL_MEMBASE:
2383 case OP_FCALL_MEMBASE:
2385 case OP_VCALL_MEMBASE:
2387 case OP_LCALL_MEMBASE:
2390 g_assert_not_reached ();
2396 /* Either METHOD or IMT_ARG needs to be set */
2398 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2402 if (COMPILE_LLVM (cfg)) {
2403 method_reg = alloc_preg (cfg);
2406 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2407 } else if (cfg->compile_aot) {
2408 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2411 MONO_INST_NEW (cfg, ins, OP_PCONST);
2412 ins->inst_p0 = method;
2413 ins->dreg = method_reg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2418 call->imt_arg_reg = method_reg;
2420 #ifdef MONO_ARCH_IMT_REG
2421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2423 /* Need this to keep the IMT arg alive */
2424 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2429 #ifdef MONO_ARCH_IMT_REG
2430 method_reg = alloc_preg (cfg);
2433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2434 } else if (cfg->compile_aot) {
2435 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2438 MONO_INST_NEW (cfg, ins, OP_PCONST);
2439 ins->inst_p0 = method;
2440 ins->dreg = method_reg;
2441 MONO_ADD_INS (cfg->cbb, ins);
2444 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2446 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2450 static MonoJumpInfo *
2451 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2453 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2457 ji->data.target = target;
2463 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2465 if (cfg->generic_sharing_context)
2466 return mono_class_check_context_used (klass);
2472 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2474 if (cfg->generic_sharing_context)
2475 return mono_method_check_context_used (method);
2481 * check_method_sharing:
2483 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2486 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2488 gboolean pass_vtable = FALSE;
2489 gboolean pass_mrgctx = FALSE;
2491 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2492 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2493 gboolean sharable = FALSE;
2495 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2498 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2499 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2500 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2502 sharable = sharing_enabled && context_sharable;
2506 * Pass vtable iff target method might
2507 * be shared, which means that sharing
2508 * is enabled for its class and its
2509 * context is sharable (and it's not a
2512 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2516 if (mini_method_get_context (cmethod) &&
2517 mini_method_get_context (cmethod)->method_inst) {
2518 g_assert (!pass_vtable);
2520 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2523 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2524 MonoGenericContext *context = mini_method_get_context (cmethod);
2525 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2527 if (sharing_enabled && context_sharable)
2529 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2534 if (out_pass_vtable)
2535 *out_pass_vtable = pass_vtable;
2536 if (out_pass_mrgctx)
2537 *out_pass_mrgctx = pass_mrgctx;
2540 inline static MonoCallInst *
2541 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2542 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2546 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2551 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2553 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2555 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2558 call->signature = sig;
2559 call->rgctx_reg = rgctx;
2560 sig_ret = mini_replace_type (sig->ret);
2562 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2565 if (mini_type_is_vtype (cfg, sig_ret)) {
2566 call->vret_var = cfg->vret_addr;
2567 //g_assert_not_reached ();
2569 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2570 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2573 temp->backend.is_pinvoke = sig->pinvoke;
2576 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2577 * address of return value to increase optimization opportunities.
2578 * Before vtype decomposition, the dreg of the call ins itself represents the
2579 * fact the call modifies the return value. After decomposition, the call will
2580 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2581 * will be transformed into an LDADDR.
2583 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2584 loada->dreg = alloc_preg (cfg);
2585 loada->inst_p0 = temp;
2586 /* We reference the call too since call->dreg could change during optimization */
2587 loada->inst_p1 = call;
2588 MONO_ADD_INS (cfg->cbb, loada);
2590 call->inst.dreg = temp->dreg;
2592 call->vret_var = loada;
2593 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2594 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2596 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2597 if (COMPILE_SOFT_FLOAT (cfg)) {
2599 * If the call has a float argument, we would need to do an r8->r4 conversion using
2600 * an icall, but that cannot be done during the call sequence since it would clobber
2601 * the call registers + the stack. So we do it before emitting the call.
2603 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2605 MonoInst *in = call->args [i];
2607 if (i >= sig->hasthis)
2608 t = sig->params [i - sig->hasthis];
2610 t = &mono_defaults.int_class->byval_arg;
2611 t = mono_type_get_underlying_type (t);
2613 if (!t->byref && t->type == MONO_TYPE_R4) {
2614 MonoInst *iargs [1];
2618 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2620 /* The result will be in an int vreg */
2621 call->args [i] = conv;
2627 call->need_unbox_trampoline = unbox_trampoline;
2630 if (COMPILE_LLVM (cfg))
2631 mono_llvm_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2635 mono_arch_emit_call (cfg, call);
2638 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2639 cfg->flags |= MONO_CFG_HAS_CALLS;
2645 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2647 #ifdef MONO_ARCH_RGCTX_REG
2648 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2649 cfg->uses_rgctx_reg = TRUE;
2650 call->rgctx_reg = TRUE;
2652 call->rgctx_arg_reg = rgctx_reg;
2659 inline static MonoInst*
2660 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2665 gboolean check_sp = FALSE;
2667 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2668 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2670 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2675 rgctx_reg = mono_alloc_preg (cfg);
2676 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2680 if (!cfg->stack_inbalance_var)
2681 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2683 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2684 ins->dreg = cfg->stack_inbalance_var->dreg;
2685 MONO_ADD_INS (cfg->cbb, ins);
2688 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2690 call->inst.sreg1 = addr->dreg;
2693 emit_imt_argument (cfg, call, NULL, imt_arg);
2695 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2700 sp_reg = mono_alloc_preg (cfg);
2702 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2704 MONO_ADD_INS (cfg->cbb, ins);
2706 /* Restore the stack so we don't crash when throwing the exception */
2707 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2708 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2709 MONO_ADD_INS (cfg->cbb, ins);
2711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2712 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2716 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2718 return (MonoInst*)call;
2722 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2727 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2730 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2731 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2733 #ifndef DISABLE_REMOTING
2734 gboolean might_be_remote = FALSE;
2736 gboolean virtual = this != NULL;
2737 gboolean enable_for_aot = TRUE;
2741 gboolean need_unbox_trampoline;
2744 sig = mono_method_signature (method);
2747 rgctx_reg = mono_alloc_preg (cfg);
2748 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2751 if (method->string_ctor) {
2752 /* Create the real signature */
2753 /* FIXME: Cache these */
2754 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2755 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2760 context_used = mini_method_check_context_used (cfg, method);
2762 #ifndef DISABLE_REMOTING
2763 might_be_remote = this && sig->hasthis &&
2764 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2765 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2767 if (might_be_remote && context_used) {
2770 g_assert (cfg->generic_sharing_context);
2772 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2774 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2778 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2780 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2782 #ifndef DISABLE_REMOTING
2783 if (might_be_remote)
2784 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2787 call->method = method;
2788 call->inst.flags |= MONO_INST_HAS_METHOD;
2789 call->inst.inst_left = this;
2790 call->tail_call = tail;
2793 int vtable_reg, slot_reg, this_reg;
2796 this_reg = this->dreg;
2798 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2799 MonoInst *dummy_use;
2801 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2803 /* Make a call to delegate->invoke_impl */
2804 call->inst.inst_basereg = this_reg;
2805 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2806 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2808 /* We must emit a dummy use here because the delegate trampoline will
2809 replace the 'this' argument with the delegate target making this activation
2810 no longer a root for the delegate.
2811 This is an issue for delegates that target collectible code such as dynamic
2812 methods of GC'able assemblies.
2814 For a test case look into #667921.
2816 FIXME: a dummy use is not the best way to do it as the local register allocator
2817 will put it on a caller save register and spil it around the call.
2818 Ideally, we would either put it on a callee save register or only do the store part.
2820 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2822 return (MonoInst*)call;
2825 if ((!cfg->compile_aot || enable_for_aot) &&
2826 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2827 (MONO_METHOD_IS_FINAL (method) &&
2828 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2829 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2831 * the method is not virtual, we just need to ensure this is not null
2832 * and then we can call the method directly.
2834 #ifndef DISABLE_REMOTING
2835 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2837 * The check above ensures method is not gshared, this is needed since
2838 * gshared methods can't have wrappers.
2840 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2844 if (!method->string_ctor)
2845 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2847 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2848 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2850 * the method is virtual, but we can statically dispatch since either
2851 * it's class or the method itself are sealed.
2852 * But first we need to ensure it's not a null reference.
2854 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2856 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2858 vtable_reg = alloc_preg (cfg);
2859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2860 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2863 guint32 imt_slot = mono_method_get_imt_slot (method);
2864 emit_imt_argument (cfg, call, call->method, imt_arg);
2865 slot_reg = vtable_reg;
2866 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2868 if (slot_reg == -1) {
2869 slot_reg = alloc_preg (cfg);
2870 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2871 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2874 slot_reg = vtable_reg;
2875 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2876 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2878 g_assert (mono_method_signature (method)->generic_param_count);
2879 emit_imt_argument (cfg, call, call->method, imt_arg);
2883 call->inst.sreg1 = slot_reg;
2884 call->inst.inst_offset = offset;
2885 call->virtual = TRUE;
2889 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2892 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2894 return (MonoInst*)call;
2898 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2900 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2904 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2911 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2914 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2916 return (MonoInst*)call;
2920 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2922 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2926 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2930 * mono_emit_abs_call:
2932 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2934 inline static MonoInst*
2935 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2936 MonoMethodSignature *sig, MonoInst **args)
2938 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2942 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2945 if (cfg->abs_patches == NULL)
2946 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2947 g_hash_table_insert (cfg->abs_patches, ji, ji);
2948 ins = mono_emit_native_call (cfg, ji, sig, args);
2949 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2954 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2956 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2957 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2961 * Native code might return non register sized integers
2962 * without initializing the upper bits.
2964 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2965 case OP_LOADI1_MEMBASE:
2966 widen_op = OP_ICONV_TO_I1;
2968 case OP_LOADU1_MEMBASE:
2969 widen_op = OP_ICONV_TO_U1;
2971 case OP_LOADI2_MEMBASE:
2972 widen_op = OP_ICONV_TO_I2;
2974 case OP_LOADU2_MEMBASE:
2975 widen_op = OP_ICONV_TO_U2;
2981 if (widen_op != -1) {
2982 int dreg = alloc_preg (cfg);
2985 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2986 widen->type = ins->type;
2996 get_memcpy_method (void)
2998 static MonoMethod *memcpy_method = NULL;
2999 if (!memcpy_method) {
3000 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3002 g_error ("Old corlib found. Install a new one");
3004 return memcpy_method;
3008 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3010 MonoClassField *field;
3011 gpointer iter = NULL;
3013 while ((field = mono_class_get_fields (klass, &iter))) {
3016 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3018 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3019 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3020 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3021 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3023 MonoClass *field_class = mono_class_from_mono_type (field->type);
3024 if (field_class->has_references)
3025 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3031 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3033 int card_table_shift_bits;
3034 gpointer card_table_mask;
3036 MonoInst *dummy_use;
3037 int nursery_shift_bits;
3038 size_t nursery_size;
3039 gboolean has_card_table_wb = FALSE;
3041 if (!cfg->gen_write_barriers)
3044 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3046 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3048 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3049 has_card_table_wb = TRUE;
3052 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3055 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3056 wbarrier->sreg1 = ptr->dreg;
3057 wbarrier->sreg2 = value->dreg;
3058 MONO_ADD_INS (cfg->cbb, wbarrier);
3059 } else if (card_table) {
3060 int offset_reg = alloc_preg (cfg);
3061 int card_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3065 if (card_table_mask)
3066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3068 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3069 * IMM's larger than 32bits.
3071 if (cfg->compile_aot) {
3072 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3074 MONO_INST_NEW (cfg, ins, OP_PCONST);
3075 ins->inst_p0 = card_table;
3076 ins->dreg = card_reg;
3077 MONO_ADD_INS (cfg->cbb, ins);
3080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3081 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3084 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3087 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3091 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3093 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3094 unsigned need_wb = 0;
3099 /*types with references can't have alignment smaller than sizeof(void*) */
3100 if (align < SIZEOF_VOID_P)
3103 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3104 if (size > 32 * SIZEOF_VOID_P)
3107 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3109 /* We don't unroll more than 5 stores to avoid code bloat. */
3110 if (size > 5 * SIZEOF_VOID_P) {
3111 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3112 size += (SIZEOF_VOID_P - 1);
3113 size &= ~(SIZEOF_VOID_P - 1);
3115 EMIT_NEW_ICONST (cfg, iargs [2], size);
3116 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3117 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3121 destreg = iargs [0]->dreg;
3122 srcreg = iargs [1]->dreg;
3125 dest_ptr_reg = alloc_preg (cfg);
3126 tmp_reg = alloc_preg (cfg);
3129 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3131 while (size >= SIZEOF_VOID_P) {
3132 MonoInst *load_inst;
3133 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3134 load_inst->dreg = tmp_reg;
3135 load_inst->inst_basereg = srcreg;
3136 load_inst->inst_offset = offset;
3137 MONO_ADD_INS (cfg->cbb, load_inst);
3139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3142 emit_write_barrier (cfg, iargs [0], load_inst);
3144 offset += SIZEOF_VOID_P;
3145 size -= SIZEOF_VOID_P;
3148 /*tmp += sizeof (void*)*/
3149 if (size >= SIZEOF_VOID_P) {
3150 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3151 MONO_ADD_INS (cfg->cbb, iargs [0]);
3155 /* Those cannot be references since size < sizeof (void*) */
3157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3165 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3181 * Emit code to copy a valuetype of type @klass whose address is stored in
3182 * @src->dreg to memory whose address is stored at @dest->dreg.
3185 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3187 MonoInst *iargs [4];
3188 int context_used, n;
3190 MonoMethod *memcpy_method;
3191 MonoInst *size_ins = NULL;
3192 MonoInst *memcpy_ins = NULL;
3196 * This check breaks with spilled vars... need to handle it during verification anyway.
3197 * g_assert (klass && klass == src->klass && klass == dest->klass);
3200 if (mini_is_gsharedvt_klass (cfg, klass)) {
3202 context_used = mini_class_check_context_used (cfg, klass);
3203 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3204 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3208 n = mono_class_native_size (klass, &align);
3210 n = mono_class_value_size (klass, &align);
3212 /* if native is true there should be no references in the struct */
3213 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3214 /* Avoid barriers when storing to the stack */
3215 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3216 (dest->opcode == OP_LDADDR))) {
3222 context_used = mini_class_check_context_used (cfg, klass);
3224 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3225 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3227 } else if (context_used) {
3228 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3230 if (cfg->compile_aot) {
3231 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3233 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3234 mono_class_compute_gc_descriptor (klass);
3239 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3241 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3246 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3247 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3248 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3253 iargs [2] = size_ins;
3255 EMIT_NEW_ICONST (cfg, iargs [2], n);
3257 memcpy_method = get_memcpy_method ();
3259 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3261 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3266 get_memset_method (void)
3268 static MonoMethod *memset_method = NULL;
3269 if (!memset_method) {
3270 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3272 g_error ("Old corlib found. Install a new one");
3274 return memset_method;
3278 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3280 MonoInst *iargs [3];
3281 int n, context_used;
3283 MonoMethod *memset_method;
3284 MonoInst *size_ins = NULL;
3285 MonoInst *bzero_ins = NULL;
3286 static MonoMethod *bzero_method;
3288 /* FIXME: Optimize this for the case when dest is an LDADDR */
3290 mono_class_init (klass);
3291 if (mini_is_gsharedvt_klass (cfg, klass)) {
3292 context_used = mini_class_check_context_used (cfg, klass);
3293 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3294 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3296 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3297 g_assert (bzero_method);
3299 iargs [1] = size_ins;
3300 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3304 n = mono_class_value_size (klass, &align);
3306 if (n <= sizeof (gpointer) * 5) {
3307 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3310 memset_method = get_memset_method ();
3312 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3313 EMIT_NEW_ICONST (cfg, iargs [2], n);
3314 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3319 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3321 MonoInst *this = NULL;
3323 g_assert (cfg->generic_sharing_context);
3325 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3326 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3327 !method->klass->valuetype)
3328 EMIT_NEW_ARGLOAD (cfg, this, 0);
3330 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3331 MonoInst *mrgctx_loc, *mrgctx_var;
3334 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3336 mrgctx_loc = mono_get_vtable_var (cfg);
3337 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3340 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3341 MonoInst *vtable_loc, *vtable_var;
3345 vtable_loc = mono_get_vtable_var (cfg);
3346 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3348 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3349 MonoInst *mrgctx_var = vtable_var;
3352 vtable_reg = alloc_preg (cfg);
3353 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3354 vtable_var->type = STACK_PTR;
3362 vtable_reg = alloc_preg (cfg);
3363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3368 static MonoJumpInfoRgctxEntry *
3369 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3371 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3372 res->method = method;
3373 res->in_mrgctx = in_mrgctx;
3374 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3375 res->data->type = patch_type;
3376 res->data->data.target = patch_data;
3377 res->info_type = info_type;
3382 static inline MonoInst*
3383 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3385 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3389 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3390 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3392 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3393 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3395 return emit_rgctx_fetch (cfg, rgctx, entry);
3399 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3400 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3402 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3403 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3405 return emit_rgctx_fetch (cfg, rgctx, entry);
3409 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3410 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3412 MonoJumpInfoGSharedVtCall *call_info;
3413 MonoJumpInfoRgctxEntry *entry;
3416 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3417 call_info->sig = sig;
3418 call_info->method = cmethod;
3420 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3421 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3423 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3429 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3431 MonoJumpInfoRgctxEntry *entry;
3434 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3435 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3437 return emit_rgctx_fetch (cfg, rgctx, entry);
3441 * emit_get_rgctx_method:
3443 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3444 * normal constants, else emit a load from the rgctx.
3447 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3448 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3450 if (!context_used) {
3453 switch (rgctx_type) {
3454 case MONO_RGCTX_INFO_METHOD:
3455 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3457 case MONO_RGCTX_INFO_METHOD_RGCTX:
3458 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3461 g_assert_not_reached ();
3464 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3465 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3467 return emit_rgctx_fetch (cfg, rgctx, entry);
3472 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3473 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3475 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3476 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3484 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3485 MonoRuntimeGenericContextInfoTemplate *template;
3490 for (i = 0; i < info->num_entries; ++i) {
3491 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3493 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3497 if (info->num_entries == info->count_entries) {
3498 MonoRuntimeGenericContextInfoTemplate *new_entries;
3499 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3501 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3503 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3504 info->entries = new_entries;
3505 info->count_entries = new_count_entries;
3508 idx = info->num_entries;
3509 template = &info->entries [idx];
3510 template->info_type = rgctx_type;
3511 template->data = data;
3513 info->num_entries ++;
3519 * emit_get_gsharedvt_info:
3521 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3524 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3529 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3530 /* Load info->entries [idx] */
3531 dreg = alloc_preg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3538 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3540 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3544 * On return the caller must check @klass for load errors.
3547 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3549 MonoInst *vtable_arg;
3553 context_used = mini_class_check_context_used (cfg, klass);
3556 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3557 klass, MONO_RGCTX_INFO_VTABLE);
3559 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3563 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3566 if (COMPILE_LLVM (cfg))
3567 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3569 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3570 #ifdef MONO_ARCH_VTABLE_REG
3571 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3572 cfg->uses_vtable_reg = TRUE;
3579 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3583 if (cfg->gen_seq_points && cfg->method == method) {
3584 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3586 ins->flags |= MONO_INST_NONEMPTY_STACK;
3587 MONO_ADD_INS (cfg->cbb, ins);
3592 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3594 if (mini_get_debug_options ()->better_cast_details) {
3595 int vtable_reg = alloc_preg (cfg);
3596 int klass_reg = alloc_preg (cfg);
3597 MonoBasicBlock *is_null_bb = NULL;
3599 int to_klass_reg, context_used;
3602 NEW_BBLOCK (cfg, is_null_bb);
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3608 tls_get = mono_get_jit_tls_intrinsic (cfg);
3610 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3614 MONO_ADD_INS (cfg->cbb, tls_get);
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3620 context_used = mini_class_check_context_used (cfg, klass);
3622 MonoInst *class_ins;
3624 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3625 to_klass_reg = class_ins->dreg;
3627 to_klass_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3633 MONO_START_BB (cfg, is_null_bb);
3635 *out_bblock = cfg->cbb;
3641 reset_cast_details (MonoCompile *cfg)
3643 /* Reset the variables holding the cast details */
3644 if (mini_get_debug_options ()->better_cast_details) {
3645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3647 MONO_ADD_INS (cfg->cbb, tls_get);
3648 /* It is enough to reset the from field */
3649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3654 * On return the caller must check @array_class for load errors
3657 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3659 int vtable_reg = alloc_preg (cfg);
3662 context_used = mini_class_check_context_used (cfg, array_class);
3664 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3668 if (cfg->opt & MONO_OPT_SHARED) {
3669 int class_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3671 if (cfg->compile_aot) {
3672 int klass_reg = alloc_preg (cfg);
3673 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3678 } else if (context_used) {
3679 MonoInst *vtable_ins;
3681 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3684 if (cfg->compile_aot) {
3688 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3690 vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3695 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3701 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3703 reset_cast_details (cfg);
3707 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3708 * generic code is generated.
3711 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3713 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3716 MonoInst *rgctx, *addr;
3718 /* FIXME: What if the class is shared? We might not
3719 have to get the address of the method from the
3721 addr = emit_get_rgctx_method (cfg, context_used, method,
3722 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3724 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3726 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3728 gboolean pass_vtable, pass_mrgctx;
3729 MonoInst *rgctx_arg = NULL;
3731 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3732 g_assert (!pass_mrgctx);
3735 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3738 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3741 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3746 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3750 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3751 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3752 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3753 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3755 obj_reg = sp [0]->dreg;
3756 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3759 /* FIXME: generics */
3760 g_assert (klass->rank == 0);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3770 MonoInst *element_class;
3772 /* This assertion is from the unboxcast insn */
3773 g_assert (klass->rank == 0);
3775 element_class = emit_get_rgctx_klass (cfg, context_used,
3776 klass->element_class, MONO_RGCTX_INFO_KLASS);
3778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3781 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3782 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3783 reset_cast_details (cfg);
3786 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3787 MONO_ADD_INS (cfg->cbb, add);
3788 add->type = STACK_MP;
3795 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3797 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3798 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3802 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3808 args [1] = klass_inst;
3811 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3813 NEW_BBLOCK (cfg, is_ref_bb);
3814 NEW_BBLOCK (cfg, is_nullable_bb);
3815 NEW_BBLOCK (cfg, end_bb);
3816 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3823 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3824 addr_reg = alloc_dreg (cfg, STACK_MP);
3828 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3829 MONO_ADD_INS (cfg->cbb, addr);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3834 MONO_START_BB (cfg, is_ref_bb);
3836 /* Save the ref to a temporary */
3837 dreg = alloc_ireg (cfg);
3838 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3839 addr->dreg = addr_reg;
3840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3844 MONO_START_BB (cfg, is_nullable_bb);
3847 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3848 MonoInst *unbox_call;
3849 MonoMethodSignature *unbox_sig;
3852 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3854 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3855 unbox_sig->ret = &klass->byval_arg;
3856 unbox_sig->param_count = 1;
3857 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3858 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3860 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3861 addr->dreg = addr_reg;
3864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3867 MONO_START_BB (cfg, end_bb);
3870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3872 *out_cbb = cfg->cbb;
3878 * Returns NULL and set the cfg exception on error.
3881 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3883 MonoInst *iargs [2];
3889 MonoInst *iargs [2];
3891 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3893 if (cfg->opt & MONO_OPT_SHARED)
3894 rgctx_info = MONO_RGCTX_INFO_KLASS;
3896 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3897 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3899 if (cfg->opt & MONO_OPT_SHARED) {
3900 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3902 alloc_ftn = mono_object_new;
3905 alloc_ftn = mono_object_new_specific;
3908 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3909 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3911 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3914 if (cfg->opt & MONO_OPT_SHARED) {
3915 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3916 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3918 alloc_ftn = mono_object_new;
3919 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3920 /* This happens often in argument checking code, eg. throw new FooException... */
3921 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3922 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3923 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3925 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3926 MonoMethod *managed_alloc = NULL;
3930 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3931 cfg->exception_ptr = klass;
3935 #ifndef MONO_CROSS_COMPILE
3936 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3939 if (managed_alloc) {
3940 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3941 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3943 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3945 guint32 lw = vtable->klass->instance_size;
3946 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3947 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3948 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3951 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3955 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3959 * Returns NULL and set the cfg exception on error.
3962 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3964 MonoInst *alloc, *ins;
3966 *out_cbb = cfg->cbb;
3968 if (mono_class_is_nullable (klass)) {
3969 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3972 /* FIXME: What if the class is shared? We might not
3973 have to get the method address from the RGCTX. */
3974 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3975 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3976 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3978 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3980 gboolean pass_vtable, pass_mrgctx;
3981 MonoInst *rgctx_arg = NULL;
3983 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3984 g_assert (!pass_mrgctx);
3987 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3990 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3993 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3997 if (mini_is_gsharedvt_klass (cfg, klass)) {
3998 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3999 MonoInst *res, *is_ref, *src_var, *addr;
4002 dreg = alloc_ireg (cfg);
4004 NEW_BBLOCK (cfg, is_ref_bb);
4005 NEW_BBLOCK (cfg, is_nullable_bb);
4006 NEW_BBLOCK (cfg, end_bb);
4007 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4012 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4015 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4018 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4019 ins->opcode = OP_STOREV_MEMBASE;
4021 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4022 res->type = STACK_OBJ;
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4027 MONO_START_BB (cfg, is_ref_bb);
4028 addr_reg = alloc_ireg (cfg);
4030 /* val is a vtype, so has to load the value manually */
4031 src_var = get_vreg_to_inst (cfg, val->dreg);
4033 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4034 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4039 MONO_START_BB (cfg, is_nullable_bb);
4042 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4043 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4045 MonoMethodSignature *box_sig;
4048 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4049 * construct that method at JIT time, so have to do things by hand.
4051 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4052 box_sig->ret = &mono_defaults.object_class->byval_arg;
4053 box_sig->param_count = 1;
4054 box_sig->params [0] = &klass->byval_arg;
4055 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4056 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4057 res->type = STACK_OBJ;
4061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4063 MONO_START_BB (cfg, end_bb);
4065 *out_cbb = cfg->cbb;
4069 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4073 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4080 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4083 MonoGenericContainer *container;
4084 MonoGenericInst *ginst;
4086 if (klass->generic_class) {
4087 container = klass->generic_class->container_class->generic_container;
4088 ginst = klass->generic_class->context.class_inst;
4089 } else if (klass->generic_container && context_used) {
4090 container = klass->generic_container;
4091 ginst = container->context.class_inst;
4096 for (i = 0; i < container->type_argc; ++i) {
4098 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4100 type = ginst->type_argv [i];
4101 if (mini_type_is_reference (cfg, type))
4107 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4110 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4112 MonoMethod *mono_castclass;
4115 mono_castclass = mono_marshal_get_castclass_with_cache ();
4117 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4118 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4119 reset_cast_details (cfg);
4120 *out_bblock = cfg->cbb;
4126 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4135 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4138 if (cfg->compile_aot) {
4139 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4140 cfg->castclass_cache_index ++;
4141 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4142 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4144 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4147 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4149 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4153 * Returns NULL and set the cfg exception on error.
4156 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4158 MonoBasicBlock *is_null_bb;
4159 int obj_reg = src->dreg;
4160 int vtable_reg = alloc_preg (cfg);
4162 MonoInst *klass_inst = NULL, *res;
4163 MonoBasicBlock *bblock;
4167 context_used = mini_class_check_context_used (cfg, klass);
4169 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4170 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4171 (*inline_costs) += 2;
4174 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4175 MonoMethod *mono_castclass;
4176 MonoInst *iargs [1];
4179 mono_castclass = mono_marshal_get_castclass (klass);
4182 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4183 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4184 iargs, ip, cfg->real_offset, TRUE, &bblock);
4185 reset_cast_details (cfg);
4186 CHECK_CFG_EXCEPTION;
4187 g_assert (costs > 0);
4189 cfg->real_offset += 5;
4191 (*inline_costs) += costs;
4200 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4201 MonoInst *cache_ins;
4203 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4208 /* klass - it's the second element of the cache entry*/
4209 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4212 args [2] = cache_ins;
4214 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4217 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4220 NEW_BBLOCK (cfg, is_null_bb);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4225 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4229 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4231 int klass_reg = alloc_preg (cfg);
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4235 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4236 /* the remoting code is broken, access the class for now */
4237 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4238 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4240 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4241 cfg->exception_ptr = klass;
4244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4249 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4252 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4256 MONO_START_BB (cfg, is_null_bb);
4258 reset_cast_details (cfg);
4269 * Returns NULL and set the cfg exception on error.
4272 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4275 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4276 int obj_reg = src->dreg;
4277 int vtable_reg = alloc_preg (cfg);
4278 int res_reg = alloc_ireg_ref (cfg);
4279 MonoInst *klass_inst = NULL;
4284 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4285 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4286 MonoInst *cache_ins;
4288 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4293 /* klass - it's the second element of the cache entry*/
4294 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4297 args [2] = cache_ins;
4299 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4302 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4305 NEW_BBLOCK (cfg, is_null_bb);
4306 NEW_BBLOCK (cfg, false_bb);
4307 NEW_BBLOCK (cfg, end_bb);
4309 /* Do the assignment at the beginning, so the other assignment can be if converted */
4310 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4311 ins->type = STACK_OBJ;
4314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4317 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4320 g_assert (!context_used);
4321 /* the is_null_bb target simply copies the input register to the output */
4322 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4324 int klass_reg = alloc_preg (cfg);
4327 int rank_reg = alloc_preg (cfg);
4328 int eclass_reg = alloc_preg (cfg);
4330 g_assert (!context_used);
4331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4336 if (klass->cast_class == mono_defaults.object_class) {
4337 int parent_reg = alloc_preg (cfg);
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4339 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4340 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4342 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4343 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4344 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4346 } else if (klass->cast_class == mono_defaults.enum_class) {
4347 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4349 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4350 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4352 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4353 /* Check that the object is a vector too */
4354 int bounds_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4360 /* the is_null_bb target simply copies the input register to the output */
4361 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4363 } else if (mono_class_is_nullable (klass)) {
4364 g_assert (!context_used);
4365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4366 /* the is_null_bb target simply copies the input register to the output */
4367 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4369 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4370 g_assert (!context_used);
4371 /* the remoting code is broken, access the class for now */
4372 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4373 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4376 cfg->exception_ptr = klass;
4379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4388 /* the is_null_bb target simply copies the input register to the output */
4389 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4394 MONO_START_BB (cfg, false_bb);
4396 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4399 MONO_START_BB (cfg, is_null_bb);
4401 MONO_START_BB (cfg, end_bb);
4407 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4409 /* This opcode takes as input an object reference and a class, and returns:
4410 0) if the object is an instance of the class,
4411 1) if the object is not instance of the class,
4412 2) if the object is a proxy whose type cannot be determined */
4415 #ifndef DISABLE_REMOTING
4416 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4418 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4420 int obj_reg = src->dreg;
4421 int dreg = alloc_ireg (cfg);
4423 #ifndef DISABLE_REMOTING
4424 int klass_reg = alloc_preg (cfg);
4427 NEW_BBLOCK (cfg, true_bb);
4428 NEW_BBLOCK (cfg, false_bb);
4429 NEW_BBLOCK (cfg, end_bb);
4430 #ifndef DISABLE_REMOTING
4431 NEW_BBLOCK (cfg, false2_bb);
4432 NEW_BBLOCK (cfg, no_proxy_bb);
4435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4438 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4439 #ifndef DISABLE_REMOTING
4440 NEW_BBLOCK (cfg, interface_fail_bb);
4443 tmp_reg = alloc_preg (cfg);
4444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4445 #ifndef DISABLE_REMOTING
4446 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4447 MONO_START_BB (cfg, interface_fail_bb);
4448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4450 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4452 tmp_reg = alloc_preg (cfg);
4453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4457 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4460 #ifndef DISABLE_REMOTING
4461 tmp_reg = alloc_preg (cfg);
4462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4465 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4466 tmp_reg = alloc_preg (cfg);
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4470 tmp_reg = alloc_preg (cfg);
4471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4475 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4478 MONO_START_BB (cfg, no_proxy_bb);
4480 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4482 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4486 MONO_START_BB (cfg, false_bb);
4488 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4491 #ifndef DISABLE_REMOTING
4492 MONO_START_BB (cfg, false2_bb);
4494 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4498 MONO_START_BB (cfg, true_bb);
4500 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4502 MONO_START_BB (cfg, end_bb);
4505 MONO_INST_NEW (cfg, ins, OP_ICONST);
4507 ins->type = STACK_I4;
4513 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4515 /* This opcode takes as input an object reference and a class, and returns:
4516 0) if the object is an instance of the class,
4517 1) if the object is a proxy whose type cannot be determined
4518 an InvalidCastException exception is thrown otherwhise*/
4521 #ifndef DISABLE_REMOTING
4522 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4524 MonoBasicBlock *ok_result_bb;
4526 int obj_reg = src->dreg;
4527 int dreg = alloc_ireg (cfg);
4528 int tmp_reg = alloc_preg (cfg);
4530 #ifndef DISABLE_REMOTING
4531 int klass_reg = alloc_preg (cfg);
4532 NEW_BBLOCK (cfg, end_bb);
4535 NEW_BBLOCK (cfg, ok_result_bb);
4537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4540 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4542 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4543 #ifndef DISABLE_REMOTING
4544 NEW_BBLOCK (cfg, interface_fail_bb);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4547 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4548 MONO_START_BB (cfg, interface_fail_bb);
4549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4551 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4553 tmp_reg = alloc_preg (cfg);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4556 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4558 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4562 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4566 #ifndef DISABLE_REMOTING
4567 NEW_BBLOCK (cfg, no_proxy_bb);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4571 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4573 tmp_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4577 tmp_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4582 NEW_BBLOCK (cfg, fail_1_bb);
4584 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4586 MONO_START_BB (cfg, fail_1_bb);
4588 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4591 MONO_START_BB (cfg, no_proxy_bb);
4593 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4595 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4599 MONO_START_BB (cfg, ok_result_bb);
4601 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4603 #ifndef DISABLE_REMOTING
4604 MONO_START_BB (cfg, end_bb);
4608 MONO_INST_NEW (cfg, ins, OP_ICONST);
4610 ins->type = STACK_I4;
4615 static G_GNUC_UNUSED MonoInst*
4616 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4618 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4619 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4620 gboolean is_i4 = TRUE;
4622 switch (enum_type->type) {
4625 #if SIZEOF_REGISTER == 8
4634 MonoInst *load, *and, *cmp, *ceq;
4635 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4636 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4637 int dest_reg = alloc_ireg (cfg);
4639 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4640 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4641 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4642 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4644 ceq->type = STACK_I4;
4647 load = mono_decompose_opcode (cfg, load);
4648 and = mono_decompose_opcode (cfg, and);
4649 cmp = mono_decompose_opcode (cfg, cmp);
4650 ceq = mono_decompose_opcode (cfg, ceq);
4658 * Returns NULL and set the cfg exception on error.
4660 static G_GNUC_UNUSED MonoInst*
4661 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4665 gpointer trampoline;
4666 MonoInst *obj, *method_ins, *tramp_ins;
4670 // FIXME reenable optimisation for virtual case
4675 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4678 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4682 obj = handle_alloc (cfg, klass, FALSE, 0);
4686 /* Inline the contents of mono_delegate_ctor */
4688 /* Set target field */
4689 /* Optimize away setting of NULL target */
4690 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4692 if (cfg->gen_write_barriers) {
4693 dreg = alloc_preg (cfg);
4694 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4695 emit_write_barrier (cfg, ptr, target);
4699 /* Set method field */
4700 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4704 * To avoid looking up the compiled code belonging to the target method
4705 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4706 * store it, and we fill it after the method has been compiled.
4708 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4709 MonoInst *code_slot_ins;
4712 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4714 domain = mono_domain_get ();
4715 mono_domain_lock (domain);
4716 if (!domain_jit_info (domain)->method_code_hash)
4717 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4718 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4720 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4721 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4723 mono_domain_unlock (domain);
4725 if (cfg->compile_aot)
4726 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4728 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4733 if (cfg->compile_aot) {
4734 MonoDelegateClassMethodPair *del_tramp;
4736 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4737 del_tramp->klass = klass;
4738 del_tramp->method = context_used ? NULL : method;
4739 del_tramp->virtual = virtual;
4740 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4743 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4745 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4746 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4749 /* Set invoke_impl field */
4751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4753 dreg = alloc_preg (cfg);
4754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4757 dreg = alloc_preg (cfg);
4758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4762 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4768 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4770 MonoJitICallInfo *info;
4772 /* Need to register the icall so it gets an icall wrapper */
4773 info = mono_get_array_new_va_icall (rank);
4775 cfg->flags |= MONO_CFG_HAS_VARARGS;
4777 /* mono_array_new_va () needs a vararg calling convention */
4778 cfg->disable_llvm = TRUE;
4780 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4781 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4785 mono_emit_load_got_addr (MonoCompile *cfg)
4787 MonoInst *getaddr, *dummy_use;
4789 if (!cfg->got_var || cfg->got_var_allocated)
4792 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4793 getaddr->cil_code = cfg->header->code;
4794 getaddr->dreg = cfg->got_var->dreg;
4796 /* Add it to the start of the first bblock */
4797 if (cfg->bb_entry->code) {
4798 getaddr->next = cfg->bb_entry->code;
4799 cfg->bb_entry->code = getaddr;
4802 MONO_ADD_INS (cfg->bb_entry, getaddr);
4804 cfg->got_var_allocated = TRUE;
4807 * Add a dummy use to keep the got_var alive, since real uses might
4808 * only be generated by the back ends.
4809 * Add it to end_bblock, so the variable's lifetime covers the whole
4811 * It would be better to make the usage of the got var explicit in all
4812 * cases when the backend needs it (i.e. calls, throw etc.), so this
4813 * wouldn't be needed.
4815 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4816 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4819 static int inline_limit;
4820 static gboolean inline_limit_inited;
4823 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4825 MonoMethodHeaderSummary header;
4827 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4828 MonoMethodSignature *sig = mono_method_signature (method);
4832 if (cfg->disable_inline)
4834 if (cfg->generic_sharing_context)
4837 if (cfg->inline_depth > 10)
4840 #ifdef MONO_ARCH_HAVE_LMF_OPS
4841 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4842 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4843 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4848 if (!mono_method_get_header_summary (method, &header))
4851 /*runtime, icall and pinvoke are checked by summary call*/
4852 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4853 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4854 (mono_class_is_marshalbyref (method->klass)) ||
4858 /* also consider num_locals? */
4859 /* Do the size check early to avoid creating vtables */
4860 if (!inline_limit_inited) {
4861 if (g_getenv ("MONO_INLINELIMIT"))
4862 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4864 inline_limit = INLINE_LENGTH_LIMIT;
4865 inline_limit_inited = TRUE;
4867 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4871 * if we can initialize the class of the method right away, we do,
4872 * otherwise we don't allow inlining if the class needs initialization,
4873 * since it would mean inserting a call to mono_runtime_class_init()
4874 * inside the inlined code
4876 if (!(cfg->opt & MONO_OPT_SHARED)) {
4877 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4878 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4879 vtable = mono_class_vtable (cfg->domain, method->klass);
4882 if (!cfg->compile_aot)
4883 mono_runtime_class_init (vtable);
4884 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4885 if (cfg->run_cctors && method->klass->has_cctor) {
4886 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4887 if (!method->klass->runtime_info)
4888 /* No vtable created yet */
4890 vtable = mono_class_vtable (cfg->domain, method->klass);
4893 /* This makes so that inline cannot trigger */
4894 /* .cctors: too many apps depend on them */
4895 /* running with a specific order... */
4896 if (! vtable->initialized)
4898 mono_runtime_class_init (vtable);
4900 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4901 if (!method->klass->runtime_info)
4902 /* No vtable created yet */
4904 vtable = mono_class_vtable (cfg->domain, method->klass);
4907 if (!vtable->initialized)
4912 * If we're compiling for shared code
4913 * the cctor will need to be run at aot method load time, for example,
4914 * or at the end of the compilation of the inlining method.
4916 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4921 * CAS - do not inline methods with declarative security
4922 * Note: this has to be before any possible return TRUE;
4924 if (mono_security_method_has_declsec (method))
4927 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4928 if (mono_arch_is_soft_float ()) {
4930 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4932 for (i = 0; i < sig->param_count; ++i)
4933 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4938 if (g_list_find (cfg->dont_inline, method))
4945 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4947 if (!cfg->compile_aot) {
4949 if (vtable->initialized)
4953 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4954 if (cfg->method == method)
4958 if (!mono_class_needs_cctor_run (klass, method))
4961 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4962 /* The initialization is already done before the method is called */
4969 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4973 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4976 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4979 mono_class_init (klass);
4980 size = mono_class_array_element_size (klass);
4983 mult_reg = alloc_preg (cfg);
4984 array_reg = arr->dreg;
4985 index_reg = index->dreg;
4987 #if SIZEOF_REGISTER == 8
4988 /* The array reg is 64 bits but the index reg is only 32 */
4989 if (COMPILE_LLVM (cfg)) {
4991 index2_reg = index_reg;
4993 index2_reg = alloc_preg (cfg);
4994 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4997 if (index->type == STACK_I8) {
4998 index2_reg = alloc_preg (cfg);
4999 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5001 index2_reg = index_reg;
5006 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5008 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5009 if (size == 1 || size == 2 || size == 4 || size == 8) {
5010 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5012 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5013 ins->klass = mono_class_get_element_class (klass);
5014 ins->type = STACK_MP;
5020 add_reg = alloc_ireg_mp (cfg);
5023 MonoInst *rgctx_ins;
5026 g_assert (cfg->generic_sharing_context);
5027 context_used = mini_class_check_context_used (cfg, klass);
5028 g_assert (context_used);
5029 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5030 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5034 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5035 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5036 ins->klass = mono_class_get_element_class (klass);
5037 ins->type = STACK_MP;
5038 MONO_ADD_INS (cfg->cbb, ins);
5043 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5045 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5047 int bounds_reg = alloc_preg (cfg);
5048 int add_reg = alloc_ireg_mp (cfg);
5049 int mult_reg = alloc_preg (cfg);
5050 int mult2_reg = alloc_preg (cfg);
5051 int low1_reg = alloc_preg (cfg);
5052 int low2_reg = alloc_preg (cfg);
5053 int high1_reg = alloc_preg (cfg);
5054 int high2_reg = alloc_preg (cfg);
5055 int realidx1_reg = alloc_preg (cfg);
5056 int realidx2_reg = alloc_preg (cfg);
5057 int sum_reg = alloc_preg (cfg);
5058 int index1, index2, tmpreg;
5062 mono_class_init (klass);
5063 size = mono_class_array_element_size (klass);
5065 index1 = index_ins1->dreg;
5066 index2 = index_ins2->dreg;
5068 #if SIZEOF_REGISTER == 8
5069 /* The array reg is 64 bits but the index reg is only 32 */
5070 if (COMPILE_LLVM (cfg)) {
5073 tmpreg = alloc_preg (cfg);
5074 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5076 tmpreg = alloc_preg (cfg);
5077 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5081 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5085 /* range checking */
5086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5087 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5089 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5090 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5091 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5092 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5093 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5094 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5095 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5097 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5098 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5099 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5100 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5101 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5102 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5103 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5105 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5106 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5108 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5109 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5111 ins->type = STACK_MP;
5113 MONO_ADD_INS (cfg->cbb, ins);
5120 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5124 MonoMethod *addr_method;
5127 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5130 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5132 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5133 /* emit_ldelema_2 depends on OP_LMUL */
5134 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5135 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5139 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5140 addr_method = mono_marshal_get_array_address (rank, element_size);
5141 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5146 static MonoBreakPolicy
5147 always_insert_breakpoint (MonoMethod *method)
5149 return MONO_BREAK_POLICY_ALWAYS;
5152 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5155 * mono_set_break_policy:
5156 * policy_callback: the new callback function
5158 * Allow embedders to decide wherther to actually obey breakpoint instructions
5159 * (both break IL instructions and Debugger.Break () method calls), for example
5160 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5161 * untrusted or semi-trusted code.
5163 * @policy_callback will be called every time a break point instruction needs to
5164 * be inserted with the method argument being the method that calls Debugger.Break()
5165 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5166 * if it wants the breakpoint to not be effective in the given method.
5167 * #MONO_BREAK_POLICY_ALWAYS is the default.
5170 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5172 if (policy_callback)
5173 break_policy_func = policy_callback;
5175 break_policy_func = always_insert_breakpoint;
5179 should_insert_brekpoint (MonoMethod *method) {
5180 switch (break_policy_func (method)) {
5181 case MONO_BREAK_POLICY_ALWAYS:
5183 case MONO_BREAK_POLICY_NEVER:
5185 case MONO_BREAK_POLICY_ON_DBG:
5186 g_warning ("mdb no longer supported");
5189 g_warning ("Incorrect value returned from break policy callback");
5194 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5196 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5198 MonoInst *addr, *store, *load;
5199 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5201 /* the bounds check is already done by the callers */
5202 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5204 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5205 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5206 if (mini_type_is_reference (cfg, fsig->params [2]))
5207 emit_write_barrier (cfg, addr, load);
5209 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5210 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5217 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5219 return mini_type_is_reference (cfg, &klass->byval_arg);
5223 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5225 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5226 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5227 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5228 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5229 MonoInst *iargs [3];
5232 mono_class_setup_vtable (obj_array);
5233 g_assert (helper->slot);
5235 if (sp [0]->type != STACK_OBJ)
5237 if (sp [2]->type != STACK_OBJ)
5244 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5248 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5251 // FIXME-VT: OP_ICONST optimization
5252 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5253 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5254 ins->opcode = OP_STOREV_MEMBASE;
5255 } else if (sp [1]->opcode == OP_ICONST) {
5256 int array_reg = sp [0]->dreg;
5257 int index_reg = sp [1]->dreg;
5258 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5261 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5264 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5265 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5266 if (generic_class_is_reference_type (cfg, klass))
5267 emit_write_barrier (cfg, addr, sp [2]);
5274 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5279 eklass = mono_class_from_mono_type (fsig->params [2]);
5281 eklass = mono_class_from_mono_type (fsig->ret);
5284 return emit_array_store (cfg, eklass, args, FALSE);
5286 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5287 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5293 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5297 //Only allow for valuetypes
5298 if (!param_klass->valuetype || !return_klass->valuetype)
5302 if (param_klass->has_references || return_klass->has_references)
5305 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5306 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5307 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5310 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5311 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5314 //And have the same size
5315 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5321 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5323 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5324 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5326 //Valuetypes that are semantically equivalent
5327 if (is_unsafe_mov_compatible (param_klass, return_klass))
5330 //Arrays of valuetypes that are semantically equivalent
5331 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5338 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5340 #ifdef MONO_ARCH_SIMD_INTRINSICS
5341 MonoInst *ins = NULL;
5343 if (cfg->opt & MONO_OPT_SIMD) {
5344 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5350 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5354 emit_memory_barrier (MonoCompile *cfg, int kind)
5356 MonoInst *ins = NULL;
5357 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5358 MONO_ADD_INS (cfg->cbb, ins);
5359 ins->backend.memory_barrier_kind = kind;
5365 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5367 MonoInst *ins = NULL;
5370 /* The LLVM backend supports these intrinsics */
5371 if (cmethod->klass == mono_defaults.math_class) {
5372 if (strcmp (cmethod->name, "Sin") == 0) {
5374 } else if (strcmp (cmethod->name, "Cos") == 0) {
5376 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5378 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5382 if (opcode && fsig->param_count == 1) {
5383 MONO_INST_NEW (cfg, ins, opcode);
5384 ins->type = STACK_R8;
5385 ins->dreg = mono_alloc_freg (cfg);
5386 ins->sreg1 = args [0]->dreg;
5387 MONO_ADD_INS (cfg->cbb, ins);
5391 if (cfg->opt & MONO_OPT_CMOV) {
5392 if (strcmp (cmethod->name, "Min") == 0) {
5393 if (fsig->params [0]->type == MONO_TYPE_I4)
5395 if (fsig->params [0]->type == MONO_TYPE_U4)
5396 opcode = OP_IMIN_UN;
5397 else if (fsig->params [0]->type == MONO_TYPE_I8)
5399 else if (fsig->params [0]->type == MONO_TYPE_U8)
5400 opcode = OP_LMIN_UN;
5401 } else if (strcmp (cmethod->name, "Max") == 0) {
5402 if (fsig->params [0]->type == MONO_TYPE_I4)
5404 if (fsig->params [0]->type == MONO_TYPE_U4)
5405 opcode = OP_IMAX_UN;
5406 else if (fsig->params [0]->type == MONO_TYPE_I8)
5408 else if (fsig->params [0]->type == MONO_TYPE_U8)
5409 opcode = OP_LMAX_UN;
5413 if (opcode && fsig->param_count == 2) {
5414 MONO_INST_NEW (cfg, ins, opcode);
5415 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5416 ins->dreg = mono_alloc_ireg (cfg);
5417 ins->sreg1 = args [0]->dreg;
5418 ins->sreg2 = args [1]->dreg;
5419 MONO_ADD_INS (cfg->cbb, ins);
5427 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5429 if (cmethod->klass == mono_defaults.array_class) {
5430 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5431 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5432 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5433 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5434 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5435 return emit_array_unsafe_mov (cfg, fsig, args);
5442 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5444 MonoInst *ins = NULL;
5446 static MonoClass *runtime_helpers_class = NULL;
5447 if (! runtime_helpers_class)
5448 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5449 "System.Runtime.CompilerServices", "RuntimeHelpers");
5451 if (cmethod->klass == mono_defaults.string_class) {
5452 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5453 int dreg = alloc_ireg (cfg);
5454 int index_reg = alloc_preg (cfg);
5455 int mult_reg = alloc_preg (cfg);
5456 int add_reg = alloc_preg (cfg);
5458 #if SIZEOF_REGISTER == 8
5459 /* The array reg is 64 bits but the index reg is only 32 */
5460 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5462 index_reg = args [1]->dreg;
5464 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5466 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5467 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5468 add_reg = ins->dreg;
5469 /* Avoid a warning */
5471 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5475 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5476 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5477 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5479 type_from_op (ins, NULL, NULL);
5481 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5482 int dreg = alloc_ireg (cfg);
5483 /* Decompose later to allow more optimizations */
5484 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5485 ins->type = STACK_I4;
5486 ins->flags |= MONO_INST_FAULT;
5487 cfg->cbb->has_array_access = TRUE;
5488 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5491 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5492 int mult_reg = alloc_preg (cfg);
5493 int add_reg = alloc_preg (cfg);
5495 /* The corlib functions check for oob already. */
5496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5497 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5498 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5499 return cfg->cbb->last_ins;
5502 } else if (cmethod->klass == mono_defaults.object_class) {
5504 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5505 int dreg = alloc_ireg_ref (cfg);
5506 int vt_reg = alloc_preg (cfg);
5507 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5508 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5509 type_from_op (ins, NULL, NULL);
5512 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5513 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5514 int dreg = alloc_ireg (cfg);
5515 int t1 = alloc_ireg (cfg);
5517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5518 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5519 ins->type = STACK_I4;
5523 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5524 MONO_INST_NEW (cfg, ins, OP_NOP);
5525 MONO_ADD_INS (cfg->cbb, ins);
5529 } else if (cmethod->klass == mono_defaults.array_class) {
5530 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5531 return emit_array_generic_access (cfg, fsig, args, FALSE);
5532 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5533 return emit_array_generic_access (cfg, fsig, args, TRUE);
5535 #ifndef MONO_BIG_ARRAYS
5537 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5540 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5541 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5542 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5543 int dreg = alloc_ireg (cfg);
5544 int bounds_reg = alloc_ireg_mp (cfg);
5545 MonoBasicBlock *end_bb, *szarray_bb;
5546 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5548 NEW_BBLOCK (cfg, end_bb);
5549 NEW_BBLOCK (cfg, szarray_bb);
5551 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5552 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5555 /* Non-szarray case */
5557 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5558 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5560 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5561 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5563 MONO_START_BB (cfg, szarray_bb);
5566 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5567 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5569 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5570 MONO_START_BB (cfg, end_bb);
5572 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5573 ins->type = STACK_I4;
5579 if (cmethod->name [0] != 'g')
5582 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5583 int dreg = alloc_ireg (cfg);
5584 int vtable_reg = alloc_preg (cfg);
5585 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5586 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5587 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5588 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5589 type_from_op (ins, NULL, NULL);
5592 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5593 int dreg = alloc_ireg (cfg);
5595 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5596 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5597 type_from_op (ins, NULL, NULL);
5602 } else if (cmethod->klass == runtime_helpers_class) {
5604 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5605 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5609 } else if (cmethod->klass == mono_defaults.thread_class) {
5610 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5611 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5612 MONO_ADD_INS (cfg->cbb, ins);
5614 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5615 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5616 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5618 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5620 if (fsig->params [0]->type == MONO_TYPE_I1)
5621 opcode = OP_LOADI1_MEMBASE;
5622 else if (fsig->params [0]->type == MONO_TYPE_U1)
5623 opcode = OP_LOADU1_MEMBASE;
5624 else if (fsig->params [0]->type == MONO_TYPE_I2)
5625 opcode = OP_LOADI2_MEMBASE;
5626 else if (fsig->params [0]->type == MONO_TYPE_U2)
5627 opcode = OP_LOADU2_MEMBASE;
5628 else if (fsig->params [0]->type == MONO_TYPE_I4)
5629 opcode = OP_LOADI4_MEMBASE;
5630 else if (fsig->params [0]->type == MONO_TYPE_U4)
5631 opcode = OP_LOADU4_MEMBASE;
5632 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5633 opcode = OP_LOADI8_MEMBASE;
5634 else if (fsig->params [0]->type == MONO_TYPE_R4)
5635 opcode = OP_LOADR4_MEMBASE;
5636 else if (fsig->params [0]->type == MONO_TYPE_R8)
5637 opcode = OP_LOADR8_MEMBASE;
5638 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5639 opcode = OP_LOAD_MEMBASE;
5642 MONO_INST_NEW (cfg, ins, opcode);
5643 ins->inst_basereg = args [0]->dreg;
5644 ins->inst_offset = 0;
5645 MONO_ADD_INS (cfg->cbb, ins);
5647 switch (fsig->params [0]->type) {
5654 ins->dreg = mono_alloc_ireg (cfg);
5658 ins->dreg = mono_alloc_lreg (cfg);
5662 ins->dreg = mono_alloc_ireg (cfg);
5666 ins->dreg = mono_alloc_freg (cfg);
5669 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5670 ins->dreg = mono_alloc_ireg_ref (cfg);
5674 if (opcode == OP_LOADI8_MEMBASE)
5675 ins = mono_decompose_opcode (cfg, ins);
5677 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5681 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5683 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5685 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5686 opcode = OP_STOREI1_MEMBASE_REG;
5687 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5688 opcode = OP_STOREI2_MEMBASE_REG;
5689 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5690 opcode = OP_STOREI4_MEMBASE_REG;
5691 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5692 opcode = OP_STOREI8_MEMBASE_REG;
5693 else if (fsig->params [0]->type == MONO_TYPE_R4)
5694 opcode = OP_STORER4_MEMBASE_REG;
5695 else if (fsig->params [0]->type == MONO_TYPE_R8)
5696 opcode = OP_STORER8_MEMBASE_REG;
5697 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5698 opcode = OP_STORE_MEMBASE_REG;
5701 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5703 MONO_INST_NEW (cfg, ins, opcode);
5704 ins->sreg1 = args [1]->dreg;
5705 ins->inst_destbasereg = args [0]->dreg;
5706 ins->inst_offset = 0;
5707 MONO_ADD_INS (cfg->cbb, ins);
5709 if (opcode == OP_STOREI8_MEMBASE_REG)
5710 ins = mono_decompose_opcode (cfg, ins);
5715 } else if (cmethod->klass == mono_defaults.monitor_class) {
5716 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5717 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5720 if (COMPILE_LLVM (cfg)) {
5722 * Pass the argument normally, the LLVM backend will handle the
5723 * calling convention problems.
5725 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5727 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5728 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5729 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5730 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5733 return (MonoInst*)call;
5734 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5735 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5738 if (COMPILE_LLVM (cfg)) {
5740 * Pass the argument normally, the LLVM backend will handle the
5741 * calling convention problems.
5743 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5745 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5746 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5747 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5748 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5751 return (MonoInst*)call;
5753 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5756 if (COMPILE_LLVM (cfg)) {
5757 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5759 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5760 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5761 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5762 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5765 return (MonoInst*)call;
5768 } else if (cmethod->klass->image == mono_defaults.corlib &&
5769 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5770 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5773 #if SIZEOF_REGISTER == 8
5774 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5775 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5776 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5777 ins->dreg = mono_alloc_preg (cfg);
5778 ins->sreg1 = args [0]->dreg;
5779 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5780 MONO_ADD_INS (cfg->cbb, ins);
5784 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5786 /* 64 bit reads are already atomic */
5787 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5788 load_ins->dreg = mono_alloc_preg (cfg);
5789 load_ins->inst_basereg = args [0]->dreg;
5790 load_ins->inst_offset = 0;
5791 MONO_ADD_INS (cfg->cbb, load_ins);
5793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5800 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5801 MonoInst *ins_iconst;
5804 if (fsig->params [0]->type == MONO_TYPE_I4) {
5805 opcode = OP_ATOMIC_ADD_I4;
5806 cfg->has_atomic_add_i4 = TRUE;
5808 #if SIZEOF_REGISTER == 8
5809 else if (fsig->params [0]->type == MONO_TYPE_I8)
5810 opcode = OP_ATOMIC_ADD_I8;
5813 if (!mono_arch_opcode_supported (opcode))
5815 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5816 ins_iconst->inst_c0 = 1;
5817 ins_iconst->dreg = mono_alloc_ireg (cfg);
5818 MONO_ADD_INS (cfg->cbb, ins_iconst);
5820 MONO_INST_NEW (cfg, ins, opcode);
5821 ins->dreg = mono_alloc_ireg (cfg);
5822 ins->inst_basereg = args [0]->dreg;
5823 ins->inst_offset = 0;
5824 ins->sreg2 = ins_iconst->dreg;
5825 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5826 MONO_ADD_INS (cfg->cbb, ins);
5828 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5829 MonoInst *ins_iconst;
5832 if (fsig->params [0]->type == MONO_TYPE_I4) {
5833 opcode = OP_ATOMIC_ADD_I4;
5834 cfg->has_atomic_add_i4 = TRUE;
5836 #if SIZEOF_REGISTER == 8
5837 else if (fsig->params [0]->type == MONO_TYPE_I8)
5838 opcode = OP_ATOMIC_ADD_I8;
5841 if (!mono_arch_opcode_supported (opcode))
5843 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5844 ins_iconst->inst_c0 = -1;
5845 ins_iconst->dreg = mono_alloc_ireg (cfg);
5846 MONO_ADD_INS (cfg->cbb, ins_iconst);
5848 MONO_INST_NEW (cfg, ins, opcode);
5849 ins->dreg = mono_alloc_ireg (cfg);
5850 ins->inst_basereg = args [0]->dreg;
5851 ins->inst_offset = 0;
5852 ins->sreg2 = ins_iconst->dreg;
5853 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5854 MONO_ADD_INS (cfg->cbb, ins);
5856 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5859 if (fsig->params [0]->type == MONO_TYPE_I4) {
5860 opcode = OP_ATOMIC_ADD_I4;
5861 cfg->has_atomic_add_i4 = TRUE;
5863 #if SIZEOF_REGISTER == 8
5864 else if (fsig->params [0]->type == MONO_TYPE_I8)
5865 opcode = OP_ATOMIC_ADD_I8;
5868 if (!mono_arch_opcode_supported (opcode))
5870 MONO_INST_NEW (cfg, ins, opcode);
5871 ins->dreg = mono_alloc_ireg (cfg);
5872 ins->inst_basereg = args [0]->dreg;
5873 ins->inst_offset = 0;
5874 ins->sreg2 = args [1]->dreg;
5875 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5876 MONO_ADD_INS (cfg->cbb, ins);
5879 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5881 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5883 if (fsig->params [0]->type == MONO_TYPE_I4) {
5884 opcode = OP_ATOMIC_EXCHANGE_I4;
5885 cfg->has_atomic_exchange_i4 = TRUE;
5887 #if SIZEOF_REGISTER == 8
5888 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5889 (fsig->params [0]->type == MONO_TYPE_I))
5890 opcode = OP_ATOMIC_EXCHANGE_I8;
5892 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5893 opcode = OP_ATOMIC_EXCHANGE_I4;
5894 cfg->has_atomic_exchange_i4 = TRUE;
5900 if (!mono_arch_opcode_supported (opcode))
5903 MONO_INST_NEW (cfg, ins, opcode);
5904 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5905 ins->inst_basereg = args [0]->dreg;
5906 ins->inst_offset = 0;
5907 ins->sreg2 = args [1]->dreg;
5908 MONO_ADD_INS (cfg->cbb, ins);
5910 switch (fsig->params [0]->type) {
5912 ins->type = STACK_I4;
5915 ins->type = STACK_I8;
5918 #if SIZEOF_REGISTER == 8
5919 ins->type = STACK_I8;
5921 ins->type = STACK_I4;
5925 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5926 ins->type = STACK_OBJ;
5930 if (cfg->gen_write_barriers && is_ref)
5931 emit_write_barrier (cfg, args [0], args [1]);
5933 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5935 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5937 if (fsig->params [1]->type == MONO_TYPE_I4)
5939 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5940 size = sizeof (gpointer);
5941 #if SIZEOF_REGISTER == 8
5942 else if (fsig->params [1]->type == MONO_TYPE_I8)
5947 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5949 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5950 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5951 ins->sreg1 = args [0]->dreg;
5952 ins->sreg2 = args [1]->dreg;
5953 ins->sreg3 = args [2]->dreg;
5954 MONO_ADD_INS (cfg->cbb, ins);
5955 cfg->has_atomic_cas_i4 = TRUE;
5956 } else if (size == 8) {
5957 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5959 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5960 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5961 ins->sreg1 = args [0]->dreg;
5962 ins->sreg2 = args [1]->dreg;
5963 ins->sreg3 = args [2]->dreg;
5964 MONO_ADD_INS (cfg->cbb, ins);
5966 /* g_assert_not_reached (); */
5970 switch (fsig->params [0]->type) {
5972 ins->type = STACK_I4;
5975 ins->type = STACK_I8;
5978 #if SIZEOF_REGISTER == 8
5979 ins->type = STACK_I8;
5981 ins->type = STACK_I4;
5985 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5986 ins->type = STACK_OBJ;
5991 if (cfg->gen_write_barriers && is_ref)
5992 emit_write_barrier (cfg, args [0], args [1]);
5994 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5995 fsig->params [1]->type == MONO_TYPE_I4) {
5996 MonoInst *cmp, *ceq;
5998 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6001 /* int32 r = CAS (location, value, comparand); */
6002 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6003 ins->dreg = alloc_ireg (cfg);
6004 ins->sreg1 = args [0]->dreg;
6005 ins->sreg2 = args [1]->dreg;
6006 ins->sreg3 = args [2]->dreg;
6007 ins->type = STACK_I4;
6008 MONO_ADD_INS (cfg->cbb, ins);
6010 /* bool result = r == comparand; */
6011 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6012 cmp->sreg1 = ins->dreg;
6013 cmp->sreg2 = args [2]->dreg;
6014 cmp->type = STACK_I4;
6015 MONO_ADD_INS (cfg->cbb, cmp);
6017 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6018 ceq->dreg = alloc_ireg (cfg);
6019 ceq->type = STACK_I4;
6020 MONO_ADD_INS (cfg->cbb, ceq);
6022 /* *success = result; */
6023 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6025 cfg->has_atomic_cas_i4 = TRUE;
6027 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6028 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6032 } else if (cmethod->klass->image == mono_defaults.corlib &&
6033 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6034 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6037 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6039 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6040 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6042 if (fsig->params [0]->type == MONO_TYPE_I1)
6043 opcode = OP_ATOMIC_LOAD_I1;
6044 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6045 opcode = OP_ATOMIC_LOAD_U1;
6046 else if (fsig->params [0]->type == MONO_TYPE_I2)
6047 opcode = OP_ATOMIC_LOAD_I2;
6048 else if (fsig->params [0]->type == MONO_TYPE_U2)
6049 opcode = OP_ATOMIC_LOAD_U2;
6050 else if (fsig->params [0]->type == MONO_TYPE_I4)
6051 opcode = OP_ATOMIC_LOAD_I4;
6052 else if (fsig->params [0]->type == MONO_TYPE_U4)
6053 opcode = OP_ATOMIC_LOAD_U4;
6054 else if (fsig->params [0]->type == MONO_TYPE_R4)
6055 opcode = OP_ATOMIC_LOAD_R4;
6056 else if (fsig->params [0]->type == MONO_TYPE_R8)
6057 opcode = OP_ATOMIC_LOAD_R8;
6058 #if SIZEOF_REGISTER == 8
6059 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6060 opcode = OP_ATOMIC_LOAD_I8;
6061 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6062 opcode = OP_ATOMIC_LOAD_U8;
6064 else if (fsig->params [0]->type == MONO_TYPE_I)
6065 opcode = OP_ATOMIC_LOAD_I4;
6066 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6067 opcode = OP_ATOMIC_LOAD_U4;
6071 if (!mono_arch_opcode_supported (opcode))
6074 MONO_INST_NEW (cfg, ins, opcode);
6075 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6076 ins->sreg1 = args [0]->dreg;
6077 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6078 MONO_ADD_INS (cfg->cbb, ins);
6082 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6084 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6086 if (fsig->params [0]->type == MONO_TYPE_I1)
6087 opcode = OP_ATOMIC_STORE_I1;
6088 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6089 opcode = OP_ATOMIC_STORE_U1;
6090 else if (fsig->params [0]->type == MONO_TYPE_I2)
6091 opcode = OP_ATOMIC_STORE_I2;
6092 else if (fsig->params [0]->type == MONO_TYPE_U2)
6093 opcode = OP_ATOMIC_STORE_U2;
6094 else if (fsig->params [0]->type == MONO_TYPE_I4)
6095 opcode = OP_ATOMIC_STORE_I4;
6096 else if (fsig->params [0]->type == MONO_TYPE_U4)
6097 opcode = OP_ATOMIC_STORE_U4;
6098 else if (fsig->params [0]->type == MONO_TYPE_R4)
6099 opcode = OP_ATOMIC_STORE_R4;
6100 else if (fsig->params [0]->type == MONO_TYPE_R8)
6101 opcode = OP_ATOMIC_STORE_R8;
6102 #if SIZEOF_REGISTER == 8
6103 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6104 opcode = OP_ATOMIC_STORE_I8;
6105 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6106 opcode = OP_ATOMIC_STORE_U8;
6108 else if (fsig->params [0]->type == MONO_TYPE_I)
6109 opcode = OP_ATOMIC_STORE_I4;
6110 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6111 opcode = OP_ATOMIC_STORE_U4;
6115 if (!mono_arch_opcode_supported (opcode))
6118 MONO_INST_NEW (cfg, ins, opcode);
6119 ins->dreg = args [0]->dreg;
6120 ins->sreg1 = args [1]->dreg;
6121 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6122 MONO_ADD_INS (cfg->cbb, ins);
6124 if (cfg->gen_write_barriers && is_ref)
6125 emit_write_barrier (cfg, args [0], args [1]);
6131 } else if (cmethod->klass->image == mono_defaults.corlib &&
6132 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6133 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6134 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6135 if (should_insert_brekpoint (cfg->method)) {
6136 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6138 MONO_INST_NEW (cfg, ins, OP_NOP);
6139 MONO_ADD_INS (cfg->cbb, ins);
6143 } else if (cmethod->klass->image == mono_defaults.corlib &&
6144 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6145 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6146 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6148 EMIT_NEW_ICONST (cfg, ins, 1);
6150 EMIT_NEW_ICONST (cfg, ins, 0);
6153 } else if (cmethod->klass == mono_defaults.math_class) {
6155 * There is general branchless code for Min/Max, but it does not work for
6157 * http://everything2.com/?node_id=1051618
6159 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6160 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6161 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6162 !strcmp (cmethod->klass->name, "Selector")) {
6163 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6164 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6165 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6168 MonoJumpInfoToken *ji;
6171 cfg->disable_llvm = TRUE;
6173 if (args [0]->opcode == OP_GOT_ENTRY) {
6174 pi = args [0]->inst_p1;
6175 g_assert (pi->opcode == OP_PATCH_INFO);
6176 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6179 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6180 ji = args [0]->inst_p0;
6183 NULLIFY_INS (args [0]);
6186 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6187 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6188 ins->dreg = mono_alloc_ireg (cfg);
6190 ins->inst_p0 = mono_string_to_utf8 (s);
6191 MONO_ADD_INS (cfg->cbb, ins);
6197 #ifdef MONO_ARCH_SIMD_INTRINSICS
6198 if (cfg->opt & MONO_OPT_SIMD) {
6199 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6205 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6209 if (COMPILE_LLVM (cfg)) {
6210 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6215 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6219 * This entry point could be used later for arbitrary method
6222 inline static MonoInst*
6223 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6224 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6226 if (method->klass == mono_defaults.string_class) {
6227 /* managed string allocation support */
6228 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6229 MonoInst *iargs [2];
6230 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6231 MonoMethod *managed_alloc = NULL;
6233 g_assert (vtable); /*Should not fail since it System.String*/
6234 #ifndef MONO_CROSS_COMPILE
6235 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
6239 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6240 iargs [1] = args [0];
6241 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6248 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6250 MonoInst *store, *temp;
6253 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6254 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6257 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6258 * would be different than the MonoInst's used to represent arguments, and
6259 * the ldelema implementation can't deal with that.
6260 * Solution: When ldelema is used on an inline argument, create a var for
6261 * it, emit ldelema on that var, and emit the saving code below in
6262 * inline_method () if needed.
6264 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6265 cfg->args [i] = temp;
6266 /* This uses cfg->args [i] which is set by the preceeding line */
6267 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6268 store->cil_code = sp [0]->cil_code;
6273 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6274 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6276 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6278 check_inline_called_method_name_limit (MonoMethod *called_method)
6281 static const char *limit = NULL;
6283 if (limit == NULL) {
6284 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6286 if (limit_string != NULL)
6287 limit = limit_string;
6292 if (limit [0] != '\0') {
6293 char *called_method_name = mono_method_full_name (called_method, TRUE);
6295 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6296 g_free (called_method_name);
6298 //return (strncmp_result <= 0);
6299 return (strncmp_result == 0);
6306 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6308 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6311 static const char *limit = NULL;
6313 if (limit == NULL) {
6314 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6315 if (limit_string != NULL) {
6316 limit = limit_string;
6322 if (limit [0] != '\0') {
6323 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6325 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6326 g_free (caller_method_name);
6328 //return (strncmp_result <= 0);
6329 return (strncmp_result == 0);
6337 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6339 static double r8_0 = 0.0;
6343 rtype = mini_replace_type (rtype);
6347 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6348 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6349 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6350 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6351 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6352 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6353 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6354 ins->type = STACK_R8;
6355 ins->inst_p0 = (void*)&r8_0;
6357 MONO_ADD_INS (cfg->cbb, ins);
6358 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6359 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6360 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6361 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6362 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6364 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6369 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6373 rtype = mini_replace_type (rtype);
6377 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6378 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6379 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6380 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6381 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6382 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6383 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6384 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6385 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6386 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6387 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6388 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6390 emit_init_rvar (cfg, dreg, rtype);
6394 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6396 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6398 MonoInst *var = cfg->locals [local];
6399 if (COMPILE_SOFT_FLOAT (cfg)) {
6401 int reg = alloc_dreg (cfg, var->type);
6402 emit_init_rvar (cfg, reg, type);
6403 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6406 emit_init_rvar (cfg, var->dreg, type);
6408 emit_dummy_init_rvar (cfg, var->dreg, type);
6415 * Return the cost of inlining CMETHOD.
6418 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6419 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6421 MonoInst *ins, *rvar = NULL;
6422 MonoMethodHeader *cheader;
6423 MonoBasicBlock *ebblock, *sbblock;
6425 MonoMethod *prev_inlined_method;
6426 MonoInst **prev_locals, **prev_args;
6427 MonoType **prev_arg_types;
6428 guint prev_real_offset;
6429 GHashTable *prev_cbb_hash;
6430 MonoBasicBlock **prev_cil_offset_to_bb;
6431 MonoBasicBlock *prev_cbb;
6432 unsigned char* prev_cil_start;
6433 guint32 prev_cil_offset_to_bb_len;
6434 MonoMethod *prev_current_method;
6435 MonoGenericContext *prev_generic_context;
6436 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6438 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6440 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6441 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6444 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6445 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6449 if (cfg->verbose_level > 2)
6450 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6452 if (!cmethod->inline_info) {
6453 cfg->stat_inlineable_methods++;
6454 cmethod->inline_info = 1;
6457 /* allocate local variables */
6458 cheader = mono_method_get_header (cmethod);
6460 if (cheader == NULL || mono_loader_get_last_error ()) {
6461 MonoLoaderError *error = mono_loader_get_last_error ();
6464 mono_metadata_free_mh (cheader);
6465 if (inline_always && error)
6466 mono_cfg_set_exception (cfg, error->exception_type);
6468 mono_loader_clear_error ();
6472 /*Must verify before creating locals as it can cause the JIT to assert.*/
6473 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6474 mono_metadata_free_mh (cheader);
6478 /* allocate space to store the return value */
6479 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6480 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6483 prev_locals = cfg->locals;
6484 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6485 for (i = 0; i < cheader->num_locals; ++i)
6486 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6488 /* allocate start and end blocks */
6489 /* This is needed so if the inline is aborted, we can clean up */
6490 NEW_BBLOCK (cfg, sbblock);
6491 sbblock->real_offset = real_offset;
6493 NEW_BBLOCK (cfg, ebblock);
6494 ebblock->block_num = cfg->num_bblocks++;
6495 ebblock->real_offset = real_offset;
6497 prev_args = cfg->args;
6498 prev_arg_types = cfg->arg_types;
6499 prev_inlined_method = cfg->inlined_method;
6500 cfg->inlined_method = cmethod;
6501 cfg->ret_var_set = FALSE;
6502 cfg->inline_depth ++;
6503 prev_real_offset = cfg->real_offset;
6504 prev_cbb_hash = cfg->cbb_hash;
6505 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6506 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6507 prev_cil_start = cfg->cil_start;
6508 prev_cbb = cfg->cbb;
6509 prev_current_method = cfg->current_method;
6510 prev_generic_context = cfg->generic_context;
6511 prev_ret_var_set = cfg->ret_var_set;
6512 prev_disable_inline = cfg->disable_inline;
6514 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6517 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6519 ret_var_set = cfg->ret_var_set;
6521 cfg->inlined_method = prev_inlined_method;
6522 cfg->real_offset = prev_real_offset;
6523 cfg->cbb_hash = prev_cbb_hash;
6524 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6525 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6526 cfg->cil_start = prev_cil_start;
6527 cfg->locals = prev_locals;
6528 cfg->args = prev_args;
6529 cfg->arg_types = prev_arg_types;
6530 cfg->current_method = prev_current_method;
6531 cfg->generic_context = prev_generic_context;
6532 cfg->ret_var_set = prev_ret_var_set;
6533 cfg->disable_inline = prev_disable_inline;
6534 cfg->inline_depth --;
6536 if ((costs >= 0 && costs < 60) || inline_always) {
6537 if (cfg->verbose_level > 2)
6538 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6540 cfg->stat_inlined_methods++;
6542 /* always add some code to avoid block split failures */
6543 MONO_INST_NEW (cfg, ins, OP_NOP);
6544 MONO_ADD_INS (prev_cbb, ins);
6546 prev_cbb->next_bb = sbblock;
6547 link_bblock (cfg, prev_cbb, sbblock);
6550 * Get rid of the begin and end bblocks if possible to aid local
6553 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6555 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6556 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6558 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6559 MonoBasicBlock *prev = ebblock->in_bb [0];
6560 mono_merge_basic_blocks (cfg, prev, ebblock);
6562 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6563 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6564 cfg->cbb = prev_cbb;
6568 * Its possible that the rvar is set in some prev bblock, but not in others.
6574 for (i = 0; i < ebblock->in_count; ++i) {
6575 bb = ebblock->in_bb [i];
6577 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6580 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6588 *out_cbb = cfg->cbb;
6592 * If the inlined method contains only a throw, then the ret var is not
6593 * set, so set it to a dummy value.
6596 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6598 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6601 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6604 if (cfg->verbose_level > 2)
6605 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6606 cfg->exception_type = MONO_EXCEPTION_NONE;
6607 mono_loader_clear_error ();
6609 /* This gets rid of the newly added bblocks */
6610 cfg->cbb = prev_cbb;
6612 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6617 * Some of these comments may well be out-of-date.
6618 * Design decisions: we do a single pass over the IL code (and we do bblock
6619 * splitting/merging in the few cases when it's required: a back jump to an IL
6620 * address that was not already seen as bblock starting point).
6621 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6622 * Complex operations are decomposed in simpler ones right away. We need to let the
6623 * arch-specific code peek and poke inside this process somehow (except when the
6624 * optimizations can take advantage of the full semantic info of coarse opcodes).
6625 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6626 * MonoInst->opcode initially is the IL opcode or some simplification of that
6627 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6628 * opcode with value bigger than OP_LAST.
6629 * At this point the IR can be handed over to an interpreter, a dumb code generator
6630 * or to the optimizing code generator that will translate it to SSA form.
6632 * Profiling directed optimizations.
6633 * We may compile by default with few or no optimizations and instrument the code
6634 * or the user may indicate what methods to optimize the most either in a config file
6635 * or through repeated runs where the compiler applies offline the optimizations to
6636 * each method and then decides if it was worth it.
6639 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6640 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6641 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6642 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6643 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6644 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6645 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6646 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6648 /* offset from br.s -> br like opcodes */
6649 #define BIG_BRANCH_OFFSET 13
6652 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6654 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6656 return b == NULL || b == bb;
6660 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6662 unsigned char *ip = start;
6663 unsigned char *target;
6666 MonoBasicBlock *bblock;
6667 const MonoOpcode *opcode;
6670 cli_addr = ip - start;
6671 i = mono_opcode_value ((const guint8 **)&ip, end);
6674 opcode = &mono_opcodes [i];
6675 switch (opcode->argument) {
6676 case MonoInlineNone:
6679 case MonoInlineString:
6680 case MonoInlineType:
6681 case MonoInlineField:
6682 case MonoInlineMethod:
6685 case MonoShortInlineR:
6692 case MonoShortInlineVar:
6693 case MonoShortInlineI:
6696 case MonoShortInlineBrTarget:
6697 target = start + cli_addr + 2 + (signed char)ip [1];
6698 GET_BBLOCK (cfg, bblock, target);
6701 GET_BBLOCK (cfg, bblock, ip);
6703 case MonoInlineBrTarget:
6704 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6705 GET_BBLOCK (cfg, bblock, target);
6708 GET_BBLOCK (cfg, bblock, ip);
6710 case MonoInlineSwitch: {
6711 guint32 n = read32 (ip + 1);
6714 cli_addr += 5 + 4 * n;
6715 target = start + cli_addr;
6716 GET_BBLOCK (cfg, bblock, target);
6718 for (j = 0; j < n; ++j) {
6719 target = start + cli_addr + (gint32)read32 (ip);
6720 GET_BBLOCK (cfg, bblock, target);
6730 g_assert_not_reached ();
6733 if (i == CEE_THROW) {
6734 unsigned char *bb_start = ip - 1;
6736 /* Find the start of the bblock containing the throw */
6738 while ((bb_start >= start) && !bblock) {
6739 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6743 bblock->out_of_line = 1;
6753 static inline MonoMethod *
6754 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6758 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6759 method = mono_method_get_wrapper_data (m, token);
6762 method = mono_class_inflate_generic_method_checked (method, context, &error);
6763 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
6766 method = mono_get_method_full (m->klass->image, token, klass, context);
6772 static inline MonoMethod *
6773 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6775 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6777 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6783 static inline MonoClass*
6784 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6789 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6790 klass = mono_method_get_wrapper_data (method, token);
6792 klass = mono_class_inflate_generic_class (klass, context);
6794 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6795 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6798 mono_class_init (klass);
6802 static inline MonoMethodSignature*
6803 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6805 MonoMethodSignature *fsig;
6807 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6810 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6812 fsig = mono_inflate_generic_signature (fsig, context, &error);
6814 g_assert (mono_error_ok (&error));
6817 fsig = mono_metadata_parse_signature (method->klass->image, token);
6823 * Returns TRUE if the JIT should abort inlining because "callee"
6824 * is influenced by security attributes.
6827 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6831 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6835 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6836 if (result == MONO_JIT_SECURITY_OK)
6839 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6840 /* Generate code to throw a SecurityException before the actual call/link */
6841 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6844 NEW_ICONST (cfg, args [0], 4);
6845 NEW_METHODCONST (cfg, args [1], caller);
6846 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6847 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6848 /* don't hide previous results */
6849 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6850 cfg->exception_data = result;
6858 throw_exception (void)
6860 static MonoMethod *method = NULL;
6863 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6864 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6871 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6873 MonoMethod *thrower = throw_exception ();
6876 EMIT_NEW_PCONST (cfg, args [0], ex);
6877 mono_emit_method_call (cfg, thrower, args, NULL);
6881 * Return the original method is a wrapper is specified. We can only access
6882 * the custom attributes from the original method.
6885 get_original_method (MonoMethod *method)
6887 if (method->wrapper_type == MONO_WRAPPER_NONE)
6890 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6891 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6894 /* in other cases we need to find the original method */
6895 return mono_marshal_method_from_wrapper (method);
6899 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6900 MonoBasicBlock *bblock, unsigned char *ip)
6902 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6903 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6905 emit_throw_exception (cfg, ex);
6909 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6910 MonoBasicBlock *bblock, unsigned char *ip)
6912 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6913 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6915 emit_throw_exception (cfg, ex);
6919 * Check that the IL instructions at ip are the array initialization
6920 * sequence and return the pointer to the data and the size.
6923 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6926 * newarr[System.Int32]
6928 * ldtoken field valuetype ...
6929 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6931 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6933 guint32 token = read32 (ip + 7);
6934 guint32 field_token = read32 (ip + 2);
6935 guint32 field_index = field_token & 0xffffff;
6937 const char *data_ptr;
6939 MonoMethod *cmethod;
6940 MonoClass *dummy_class;
6941 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6945 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6949 *out_field_token = field_token;
6951 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6954 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6956 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6957 case MONO_TYPE_BOOLEAN:
6961 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6962 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6963 case MONO_TYPE_CHAR:
6980 if (size > mono_type_size (field->type, &dummy_align))
6983 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6984 if (!image_is_dynamic (method->klass->image)) {
6985 field_index = read32 (ip + 2) & 0xffffff;
6986 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6987 data_ptr = mono_image_rva_map (method->klass->image, rva);
6988 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6989 /* for aot code we do the lookup on load */
6990 if (aot && data_ptr)
6991 return GUINT_TO_POINTER (rva);
6993 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6995 data_ptr = mono_field_get_data (field);
7003 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7005 char *method_fname = mono_method_full_name (method, TRUE);
7007 MonoMethodHeader *header = mono_method_get_header (method);
7009 if (header->code_size == 0)
7010 method_code = g_strdup ("method body is empty.");
7012 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7013 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7014 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7015 g_free (method_fname);
7016 g_free (method_code);
7017 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7021 set_exception_object (MonoCompile *cfg, MonoException *exception)
7023 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7024 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7025 cfg->exception_ptr = exception;
7029 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7032 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7033 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7034 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7035 /* Optimize reg-reg moves away */
7037 * Can't optimize other opcodes, since sp[0] might point to
7038 * the last ins of a decomposed opcode.
7040 sp [0]->dreg = (cfg)->locals [n]->dreg;
7042 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7047 * ldloca inhibits many optimizations so try to get rid of it in common
7050 static inline unsigned char *
7051 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7061 local = read16 (ip + 2);
7065 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7066 /* From the INITOBJ case */
7067 token = read32 (ip + 2);
7068 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7069 CHECK_TYPELOAD (klass);
7070 type = mini_replace_type (&klass->byval_arg);
7071 emit_init_local (cfg, local, type, TRUE);
7079 is_exception_class (MonoClass *class)
7082 if (class == mono_defaults.exception_class)
7084 class = class->parent;
7090 * is_jit_optimizer_disabled:
7092 * Determine whenever M's assembly has a DebuggableAttribute with the
7093 * IsJITOptimizerDisabled flag set.
7096 is_jit_optimizer_disabled (MonoMethod *m)
7098 MonoAssembly *ass = m->klass->image->assembly;
7099 MonoCustomAttrInfo* attrs;
7100 static MonoClass *klass;
7102 gboolean val = FALSE;
7105 if (ass->jit_optimizer_disabled_inited)
7106 return ass->jit_optimizer_disabled;
7109 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7112 ass->jit_optimizer_disabled = FALSE;
7113 mono_memory_barrier ();
7114 ass->jit_optimizer_disabled_inited = TRUE;
7118 attrs = mono_custom_attrs_from_assembly (ass);
7120 for (i = 0; i < attrs->num_attrs; ++i) {
7121 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7124 MonoMethodSignature *sig;
7126 if (!attr->ctor || attr->ctor->klass != klass)
7128 /* Decode the attribute. See reflection.c */
7129 len = attr->data_size;
7130 p = (const char*)attr->data;
7131 g_assert (read16 (p) == 0x0001);
7134 // FIXME: Support named parameters
7135 sig = mono_method_signature (attr->ctor);
7136 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7138 /* Two boolean arguments */
7142 mono_custom_attrs_free (attrs);
7145 ass->jit_optimizer_disabled = val;
7146 mono_memory_barrier ();
7147 ass->jit_optimizer_disabled_inited = TRUE;
7153 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7155 gboolean supported_tail_call;
7158 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7159 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7161 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7164 for (i = 0; i < fsig->param_count; ++i) {
7165 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7166 /* These can point to the current method's stack */
7167 supported_tail_call = FALSE;
7169 if (fsig->hasthis && cmethod->klass->valuetype)
7170 /* this might point to the current method's stack */
7171 supported_tail_call = FALSE;
7172 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7173 supported_tail_call = FALSE;
7174 if (cfg->method->save_lmf)
7175 supported_tail_call = FALSE;
7176 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7177 supported_tail_call = FALSE;
7178 if (call_opcode != CEE_CALL)
7179 supported_tail_call = FALSE;
7181 /* Debugging support */
7183 if (supported_tail_call) {
7184 if (!mono_debug_count ())
7185 supported_tail_call = FALSE;
7189 return supported_tail_call;
7192 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7193 * it to the thread local value based on the tls_offset field. Every other kind of access to
7194 * the field causes an assert.
7197 is_magic_tls_access (MonoClassField *field)
7199 if (strcmp (field->name, "tlsdata"))
7201 if (strcmp (field->parent->name, "ThreadLocal`1"))
7203 return field->parent->image == mono_defaults.corlib;
7206 /* emits the code needed to access a managed tls var (like ThreadStatic)
7207 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7208 * pointer for the current thread.
7209 * Returns the MonoInst* representing the address of the tls var.
7212 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7215 int static_data_reg, array_reg, dreg;
7216 int offset2_reg, idx_reg;
7217 // inlined access to the tls data
7218 // idx = (offset >> 24) - 1;
7219 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7220 static_data_reg = alloc_ireg (cfg);
7221 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7222 idx_reg = alloc_ireg (cfg);
7223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7225 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7226 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7227 array_reg = alloc_ireg (cfg);
7228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7229 offset2_reg = alloc_ireg (cfg);
7230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7231 dreg = alloc_ireg (cfg);
7232 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7237 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7238 * this address is cached per-method in cached_tls_addr.
7241 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7243 MonoInst *load, *addr, *temp, *store, *thread_ins;
7244 MonoClassField *offset_field;
7246 if (*cached_tls_addr) {
7247 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7250 thread_ins = mono_get_thread_intrinsic (cfg);
7251 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7253 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7255 MONO_ADD_INS (cfg->cbb, thread_ins);
7257 MonoMethod *thread_method;
7258 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7259 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7261 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7262 addr->klass = mono_class_from_mono_type (tls_field->type);
7263 addr->type = STACK_MP;
7264 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7265 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7267 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7274 * Handle calls made to ctors from NEWOBJ opcodes.
7276 * REF_BBLOCK will point to the current bblock after the call.
7279 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7280 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7282 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7283 MonoBasicBlock *bblock = *ref_bblock;
7285 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7286 mono_method_is_generic_sharable (cmethod, TRUE)) {
7287 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7288 mono_class_vtable (cfg->domain, cmethod->klass);
7289 CHECK_TYPELOAD (cmethod->klass);
7291 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7292 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7295 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7296 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7298 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7300 CHECK_TYPELOAD (cmethod->klass);
7301 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7306 /* Avoid virtual calls to ctors if possible */
7307 if (mono_class_is_marshalbyref (cmethod->klass))
7308 callvirt_this_arg = sp [0];
7310 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7311 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7312 CHECK_CFG_EXCEPTION;
7313 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7314 mono_method_check_inlining (cfg, cmethod) &&
7315 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7318 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7319 cfg->real_offset += 5;
7321 *inline_costs += costs - 5;
7322 *ref_bblock = bblock;
7324 INLINE_FAILURE ("inline failure");
7325 // FIXME-VT: Clean this up
7326 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7327 GSHAREDVT_FAILURE(*ip);
7328 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7330 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7333 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7334 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7335 } else if (context_used &&
7336 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7337 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7338 MonoInst *cmethod_addr;
7340 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7342 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7343 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7345 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7347 INLINE_FAILURE ("ctor call");
7348 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7349 callvirt_this_arg, NULL, vtable_arg);
7356 * mono_method_to_ir:
7358 * Translate the .net IL into linear IR.
7361 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7362 MonoInst *return_var, MonoInst **inline_args,
7363 guint inline_offset, gboolean is_virtual_call)
7366 MonoInst *ins, **sp, **stack_start;
7367 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7368 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7369 MonoMethod *cmethod, *method_definition;
7370 MonoInst **arg_array;
7371 MonoMethodHeader *header;
7373 guint32 token, ins_flag;
7375 MonoClass *constrained_call = NULL;
7376 unsigned char *ip, *end, *target, *err_pos;
7377 MonoMethodSignature *sig;
7378 MonoGenericContext *generic_context = NULL;
7379 MonoGenericContainer *generic_container = NULL;
7380 MonoType **param_types;
7381 int i, n, start_new_bblock, dreg;
7382 int num_calls = 0, inline_costs = 0;
7383 int breakpoint_id = 0;
7385 MonoBoolean security, pinvoke;
7386 MonoSecurityManager* secman = NULL;
7387 MonoDeclSecurityActions actions;
7388 GSList *class_inits = NULL;
7389 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7391 gboolean init_locals, seq_points, skip_dead_blocks;
7392 gboolean sym_seq_points = FALSE;
7393 MonoInst *cached_tls_addr = NULL;
7394 MonoDebugMethodInfo *minfo;
7395 MonoBitSet *seq_point_locs = NULL;
7396 MonoBitSet *seq_point_set_locs = NULL;
7398 cfg->disable_inline = is_jit_optimizer_disabled (method);
7400 /* serialization and xdomain stuff may need access to private fields and methods */
7401 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7402 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7403 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7404 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7405 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7406 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7408 dont_verify |= mono_security_smcs_hack_enabled ();
7410 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7411 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7412 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7413 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7414 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7416 image = method->klass->image;
7417 header = mono_method_get_header (method);
7419 MonoLoaderError *error;
7421 if ((error = mono_loader_get_last_error ())) {
7422 mono_cfg_set_exception (cfg, error->exception_type);
7424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7425 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7427 goto exception_exit;
7429 generic_container = mono_method_get_generic_container (method);
7430 sig = mono_method_signature (method);
7431 num_args = sig->hasthis + sig->param_count;
7432 ip = (unsigned char*)header->code;
7433 cfg->cil_start = ip;
7434 end = ip + header->code_size;
7435 cfg->stat_cil_code_size += header->code_size;
7437 seq_points = cfg->gen_seq_points && cfg->method == method;
7438 #ifdef PLATFORM_ANDROID
7439 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7442 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7443 /* We could hit a seq point before attaching to the JIT (#8338) */
7447 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7448 minfo = mono_debug_lookup_method (method);
7450 int i, n_il_offsets;
7454 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7455 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7456 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7457 sym_seq_points = TRUE;
7458 for (i = 0; i < n_il_offsets; ++i) {
7459 if (il_offsets [i] < header->code_size)
7460 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7462 g_free (il_offsets);
7463 g_free (line_numbers);
7464 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7465 /* Methods without line number info like auto-generated property accessors */
7466 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7467 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7468 sym_seq_points = TRUE;
7473 * Methods without init_locals set could cause asserts in various passes
7474 * (#497220). To work around this, we emit dummy initialization opcodes
7475 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7476 * on some platforms.
7478 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7479 init_locals = header->init_locals;
7483 method_definition = method;
7484 while (method_definition->is_inflated) {
7485 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7486 method_definition = imethod->declaring;
7489 /* SkipVerification is not allowed if core-clr is enabled */
7490 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7492 dont_verify_stloc = TRUE;
7495 if (sig->is_inflated)
7496 generic_context = mono_method_get_context (method);
7497 else if (generic_container)
7498 generic_context = &generic_container->context;
7499 cfg->generic_context = generic_context;
7501 if (!cfg->generic_sharing_context)
7502 g_assert (!sig->has_type_parameters);
7504 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7505 g_assert (method->is_inflated);
7506 g_assert (mono_method_get_context (method)->method_inst);
7508 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7509 g_assert (sig->generic_param_count);
7511 if (cfg->method == method) {
7512 cfg->real_offset = 0;
7514 cfg->real_offset = inline_offset;
7517 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7518 cfg->cil_offset_to_bb_len = header->code_size;
7520 cfg->current_method = method;
7522 if (cfg->verbose_level > 2)
7523 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7525 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7527 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7528 for (n = 0; n < sig->param_count; ++n)
7529 param_types [n + sig->hasthis] = sig->params [n];
7530 cfg->arg_types = param_types;
7532 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7533 if (cfg->method == method) {
7535 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7536 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7539 NEW_BBLOCK (cfg, start_bblock);
7540 cfg->bb_entry = start_bblock;
7541 start_bblock->cil_code = NULL;
7542 start_bblock->cil_length = 0;
7543 #if defined(__native_client_codegen__)
7544 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7545 ins->dreg = alloc_dreg (cfg, STACK_I4);
7546 MONO_ADD_INS (start_bblock, ins);
7550 NEW_BBLOCK (cfg, end_bblock);
7551 cfg->bb_exit = end_bblock;
7552 end_bblock->cil_code = NULL;
7553 end_bblock->cil_length = 0;
7554 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7555 g_assert (cfg->num_bblocks == 2);
7557 arg_array = cfg->args;
7559 if (header->num_clauses) {
7560 cfg->spvars = g_hash_table_new (NULL, NULL);
7561 cfg->exvars = g_hash_table_new (NULL, NULL);
7563 /* handle exception clauses */
7564 for (i = 0; i < header->num_clauses; ++i) {
7565 MonoBasicBlock *try_bb;
7566 MonoExceptionClause *clause = &header->clauses [i];
7567 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7568 try_bb->real_offset = clause->try_offset;
7569 try_bb->try_start = TRUE;
7570 try_bb->region = ((i + 1) << 8) | clause->flags;
7571 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7572 tblock->real_offset = clause->handler_offset;
7573 tblock->flags |= BB_EXCEPTION_HANDLER;
7576 * Linking the try block with the EH block hinders inlining as we won't be able to
7577 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7579 if (COMPILE_LLVM (cfg))
7580 link_bblock (cfg, try_bb, tblock);
7582 if (*(ip + clause->handler_offset) == CEE_POP)
7583 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7585 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7586 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7587 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7588 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7589 MONO_ADD_INS (tblock, ins);
7591 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7592 /* finally clauses already have a seq point */
7593 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7594 MONO_ADD_INS (tblock, ins);
7597 /* todo: is a fault block unsafe to optimize? */
7598 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7599 tblock->flags |= BB_EXCEPTION_UNSAFE;
7603 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7605 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7607 /* catch and filter blocks get the exception object on the stack */
7608 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7609 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7610 MonoInst *dummy_use;
7612 /* mostly like handle_stack_args (), but just sets the input args */
7613 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7614 tblock->in_scount = 1;
7615 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7616 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7619 * Add a dummy use for the exvar so its liveness info will be
7623 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7625 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7626 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7627 tblock->flags |= BB_EXCEPTION_HANDLER;
7628 tblock->real_offset = clause->data.filter_offset;
7629 tblock->in_scount = 1;
7630 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7631 /* The filter block shares the exvar with the handler block */
7632 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7633 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7634 MONO_ADD_INS (tblock, ins);
7638 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7639 clause->data.catch_class &&
7640 cfg->generic_sharing_context &&
7641 mono_class_check_context_used (clause->data.catch_class)) {
7643 * In shared generic code with catch
7644 * clauses containing type variables
7645 * the exception handling code has to
7646 * be able to get to the rgctx.
7647 * Therefore we have to make sure that
7648 * the vtable/mrgctx argument (for
7649 * static or generic methods) or the
7650 * "this" argument (for non-static
7651 * methods) are live.
7653 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7654 mini_method_get_context (method)->method_inst ||
7655 method->klass->valuetype) {
7656 mono_get_vtable_var (cfg);
7658 MonoInst *dummy_use;
7660 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7665 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7666 cfg->cbb = start_bblock;
7667 cfg->args = arg_array;
7668 mono_save_args (cfg, sig, inline_args);
7671 /* FIRST CODE BLOCK */
7672 NEW_BBLOCK (cfg, bblock);
7673 bblock->cil_code = ip;
7677 ADD_BBLOCK (cfg, bblock);
7679 if (cfg->method == method) {
7680 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7681 if (breakpoint_id) {
7682 MONO_INST_NEW (cfg, ins, OP_BREAK);
7683 MONO_ADD_INS (bblock, ins);
7687 if (mono_security_cas_enabled ())
7688 secman = mono_security_manager_get_methods ();
7690 security = (secman && mono_security_method_has_declsec (method));
7691 /* at this point having security doesn't mean we have any code to generate */
7692 if (security && (cfg->method == method)) {
7693 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7694 * And we do not want to enter the next section (with allocation) if we
7695 * have nothing to generate */
7696 security = mono_declsec_get_demands (method, &actions);
7699 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7700 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7702 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7703 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7704 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7706 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7707 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7711 mono_custom_attrs_free (custom);
7714 custom = mono_custom_attrs_from_class (wrapped->klass);
7715 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7719 mono_custom_attrs_free (custom);
7722 /* not a P/Invoke after all */
7727 /* we use a separate basic block for the initialization code */
7728 NEW_BBLOCK (cfg, init_localsbb);
7729 cfg->bb_init = init_localsbb;
7730 init_localsbb->real_offset = cfg->real_offset;
7731 start_bblock->next_bb = init_localsbb;
7732 init_localsbb->next_bb = bblock;
7733 link_bblock (cfg, start_bblock, init_localsbb);
7734 link_bblock (cfg, init_localsbb, bblock);
7736 cfg->cbb = init_localsbb;
7738 if (cfg->gsharedvt && cfg->method == method) {
7739 MonoGSharedVtMethodInfo *info;
7740 MonoInst *var, *locals_var;
7743 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7744 info->method = cfg->method;
7745 info->count_entries = 16;
7746 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7747 cfg->gsharedvt_info = info;
7749 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7750 /* prevent it from being register allocated */
7751 //var->flags |= MONO_INST_VOLATILE;
7752 cfg->gsharedvt_info_var = var;
7754 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7755 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7757 /* Allocate locals */
7758 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7759 /* prevent it from being register allocated */
7760 //locals_var->flags |= MONO_INST_VOLATILE;
7761 cfg->gsharedvt_locals_var = locals_var;
7763 dreg = alloc_ireg (cfg);
7764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7766 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7767 ins->dreg = locals_var->dreg;
7769 MONO_ADD_INS (cfg->cbb, ins);
7770 cfg->gsharedvt_locals_var_ins = ins;
7772 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7775 ins->flags |= MONO_INST_INIT;
7779 /* at this point we know, if security is TRUE, that some code needs to be generated */
7780 if (security && (cfg->method == method)) {
7783 cfg->stat_cas_demand_generation++;
7785 if (actions.demand.blob) {
7786 /* Add code for SecurityAction.Demand */
7787 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7788 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7789 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7790 mono_emit_method_call (cfg, secman->demand, args, NULL);
7792 if (actions.noncasdemand.blob) {
7793 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7794 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7795 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7796 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7797 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7798 mono_emit_method_call (cfg, secman->demand, args, NULL);
7800 if (actions.demandchoice.blob) {
7801 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7802 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7803 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7804 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7805 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7809 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7811 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7814 if (mono_security_core_clr_enabled ()) {
7815 /* check if this is native code, e.g. an icall or a p/invoke */
7816 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7817 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7819 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7820 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7822 /* if this ia a native call then it can only be JITted from platform code */
7823 if ((icall || pinvk) && method->klass && method->klass->image) {
7824 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7825 MonoException *ex = icall ? mono_get_exception_security () :
7826 mono_get_exception_method_access ();
7827 emit_throw_exception (cfg, ex);
7834 CHECK_CFG_EXCEPTION;
7836 if (header->code_size == 0)
7839 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7844 if (cfg->method == method)
7845 mono_debug_init_method (cfg, bblock, breakpoint_id);
7847 for (n = 0; n < header->num_locals; ++n) {
7848 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7853 /* We force the vtable variable here for all shared methods
7854 for the possibility that they might show up in a stack
7855 trace where their exact instantiation is needed. */
7856 if (cfg->generic_sharing_context && method == cfg->method) {
7857 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7858 mini_method_get_context (method)->method_inst ||
7859 method->klass->valuetype) {
7860 mono_get_vtable_var (cfg);
7862 /* FIXME: Is there a better way to do this?
7863 We need the variable live for the duration
7864 of the whole method. */
7865 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7869 /* add a check for this != NULL to inlined methods */
7870 if (is_virtual_call) {
7873 NEW_ARGLOAD (cfg, arg_ins, 0);
7874 MONO_ADD_INS (cfg->cbb, arg_ins);
7875 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7878 skip_dead_blocks = !dont_verify;
7879 if (skip_dead_blocks) {
7880 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7885 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7886 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7889 start_new_bblock = 0;
7892 if (cfg->method == method)
7893 cfg->real_offset = ip - header->code;
7895 cfg->real_offset = inline_offset;
7900 if (start_new_bblock) {
7901 bblock->cil_length = ip - bblock->cil_code;
7902 if (start_new_bblock == 2) {
7903 g_assert (ip == tblock->cil_code);
7905 GET_BBLOCK (cfg, tblock, ip);
7907 bblock->next_bb = tblock;
7910 start_new_bblock = 0;
7911 for (i = 0; i < bblock->in_scount; ++i) {
7912 if (cfg->verbose_level > 3)
7913 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7914 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7918 g_slist_free (class_inits);
7921 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7922 link_bblock (cfg, bblock, tblock);
7923 if (sp != stack_start) {
7924 handle_stack_args (cfg, stack_start, sp - stack_start);
7926 CHECK_UNVERIFIABLE (cfg);
7928 bblock->next_bb = tblock;
7931 for (i = 0; i < bblock->in_scount; ++i) {
7932 if (cfg->verbose_level > 3)
7933 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7934 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7937 g_slist_free (class_inits);
7942 if (skip_dead_blocks) {
7943 int ip_offset = ip - header->code;
7945 if (ip_offset == bb->end)
7949 int op_size = mono_opcode_size (ip, end);
7950 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7952 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7954 if (ip_offset + op_size == bb->end) {
7955 MONO_INST_NEW (cfg, ins, OP_NOP);
7956 MONO_ADD_INS (bblock, ins);
7957 start_new_bblock = 1;
7965 * Sequence points are points where the debugger can place a breakpoint.
7966 * Currently, we generate these automatically at points where the IL
7969 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7971 * Make methods interruptable at the beginning, and at the targets of
7972 * backward branches.
7973 * Also, do this at the start of every bblock in methods with clauses too,
7974 * to be able to handle instructions with inprecise control flow like
7976 * Backward branches are handled at the end of method-to-ir ().
7978 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7980 /* Avoid sequence points on empty IL like .volatile */
7981 // FIXME: Enable this
7982 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7983 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7984 if (sp != stack_start)
7985 ins->flags |= MONO_INST_NONEMPTY_STACK;
7986 MONO_ADD_INS (cfg->cbb, ins);
7989 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7992 bblock->real_offset = cfg->real_offset;
7994 if ((cfg->method == method) && cfg->coverage_info) {
7995 guint32 cil_offset = ip - header->code;
7996 cfg->coverage_info->data [cil_offset].cil_code = ip;
7998 /* TODO: Use an increment here */
7999 #if defined(TARGET_X86)
8000 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8001 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8003 MONO_ADD_INS (cfg->cbb, ins);
8005 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8006 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8010 if (cfg->verbose_level > 3)
8011 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8015 if (seq_points && !sym_seq_points && sp != stack_start) {
8017 * The C# compiler uses these nops to notify the JIT that it should
8018 * insert seq points.
8020 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8021 MONO_ADD_INS (cfg->cbb, ins);
8023 if (cfg->keep_cil_nops)
8024 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8026 MONO_INST_NEW (cfg, ins, OP_NOP);
8028 MONO_ADD_INS (bblock, ins);
8031 if (should_insert_brekpoint (cfg->method)) {
8032 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8034 MONO_INST_NEW (cfg, ins, OP_NOP);
8037 MONO_ADD_INS (bblock, ins);
8043 CHECK_STACK_OVF (1);
8044 n = (*ip)-CEE_LDARG_0;
8046 EMIT_NEW_ARGLOAD (cfg, ins, n);
8054 CHECK_STACK_OVF (1);
8055 n = (*ip)-CEE_LDLOC_0;
8057 EMIT_NEW_LOCLOAD (cfg, ins, n);
8066 n = (*ip)-CEE_STLOC_0;
8069 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8071 emit_stloc_ir (cfg, sp, header, n);
8078 CHECK_STACK_OVF (1);
8081 EMIT_NEW_ARGLOAD (cfg, ins, n);
8087 CHECK_STACK_OVF (1);
8090 NEW_ARGLOADA (cfg, ins, n);
8091 MONO_ADD_INS (cfg->cbb, ins);
8101 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8103 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8108 CHECK_STACK_OVF (1);
8111 EMIT_NEW_LOCLOAD (cfg, ins, n);
8115 case CEE_LDLOCA_S: {
8116 unsigned char *tmp_ip;
8118 CHECK_STACK_OVF (1);
8119 CHECK_LOCAL (ip [1]);
8121 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8127 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8136 CHECK_LOCAL (ip [1]);
8137 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8139 emit_stloc_ir (cfg, sp, header, ip [1]);
8144 CHECK_STACK_OVF (1);
8145 EMIT_NEW_PCONST (cfg, ins, NULL);
8146 ins->type = STACK_OBJ;
8151 CHECK_STACK_OVF (1);
8152 EMIT_NEW_ICONST (cfg, ins, -1);
8165 CHECK_STACK_OVF (1);
8166 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8172 CHECK_STACK_OVF (1);
8174 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8180 CHECK_STACK_OVF (1);
8181 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8187 CHECK_STACK_OVF (1);
8188 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8189 ins->type = STACK_I8;
8190 ins->dreg = alloc_dreg (cfg, STACK_I8);
8192 ins->inst_l = (gint64)read64 (ip);
8193 MONO_ADD_INS (bblock, ins);
8199 gboolean use_aotconst = FALSE;
8201 #ifdef TARGET_POWERPC
8202 /* FIXME: Clean this up */
8203 if (cfg->compile_aot)
8204 use_aotconst = TRUE;
8207 /* FIXME: we should really allocate this only late in the compilation process */
8208 f = mono_domain_alloc (cfg->domain, sizeof (float));
8210 CHECK_STACK_OVF (1);
8216 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8218 dreg = alloc_freg (cfg);
8219 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8220 ins->type = STACK_R8;
8222 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8223 ins->type = STACK_R8;
8224 ins->dreg = alloc_dreg (cfg, STACK_R8);
8226 MONO_ADD_INS (bblock, ins);
8236 gboolean use_aotconst = FALSE;
8238 #ifdef TARGET_POWERPC
8239 /* FIXME: Clean this up */
8240 if (cfg->compile_aot)
8241 use_aotconst = TRUE;
8244 /* FIXME: we should really allocate this only late in the compilation process */
8245 d = mono_domain_alloc (cfg->domain, sizeof (double));
8247 CHECK_STACK_OVF (1);
8253 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8255 dreg = alloc_freg (cfg);
8256 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8257 ins->type = STACK_R8;
8259 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8260 ins->type = STACK_R8;
8261 ins->dreg = alloc_dreg (cfg, STACK_R8);
8263 MONO_ADD_INS (bblock, ins);
8272 MonoInst *temp, *store;
8274 CHECK_STACK_OVF (1);
8278 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8279 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8281 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8284 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8297 if (sp [0]->type == STACK_R8)
8298 /* we need to pop the value from the x86 FP stack */
8299 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8305 INLINE_FAILURE ("jmp");
8306 GSHAREDVT_FAILURE (*ip);
8309 if (stack_start != sp)
8311 token = read32 (ip + 1);
8312 /* FIXME: check the signature matches */
8313 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8315 if (!cmethod || mono_loader_get_last_error ())
8318 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8319 GENERIC_SHARING_FAILURE (CEE_JMP);
8321 if (mono_security_cas_enabled ())
8322 CHECK_CFG_EXCEPTION;
8324 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8326 if (ARCH_HAVE_OP_TAIL_CALL) {
8327 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8330 /* Handle tail calls similarly to calls */
8331 n = fsig->param_count + fsig->hasthis;
8335 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8336 call->method = cmethod;
8337 call->tail_call = TRUE;
8338 call->signature = mono_method_signature (cmethod);
8339 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8340 call->inst.inst_p0 = cmethod;
8341 for (i = 0; i < n; ++i)
8342 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8344 mono_arch_emit_call (cfg, call);
8345 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8346 MONO_ADD_INS (bblock, (MonoInst*)call);
8348 for (i = 0; i < num_args; ++i)
8349 /* Prevent arguments from being optimized away */
8350 arg_array [i]->flags |= MONO_INST_VOLATILE;
8352 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8353 ins = (MonoInst*)call;
8354 ins->inst_p0 = cmethod;
8355 MONO_ADD_INS (bblock, ins);
8359 start_new_bblock = 1;
8364 case CEE_CALLVIRT: {
8365 MonoInst *addr = NULL;
8366 MonoMethodSignature *fsig = NULL;
8368 int virtual = *ip == CEE_CALLVIRT;
8369 int calli = *ip == CEE_CALLI;
8370 gboolean pass_imt_from_rgctx = FALSE;
8371 MonoInst *imt_arg = NULL;
8372 MonoInst *keep_this_alive = NULL;
8373 gboolean pass_vtable = FALSE;
8374 gboolean pass_mrgctx = FALSE;
8375 MonoInst *vtable_arg = NULL;
8376 gboolean check_this = FALSE;
8377 gboolean supported_tail_call = FALSE;
8378 gboolean tail_call = FALSE;
8379 gboolean need_seq_point = FALSE;
8380 guint32 call_opcode = *ip;
8381 gboolean emit_widen = TRUE;
8382 gboolean push_res = TRUE;
8383 gboolean skip_ret = FALSE;
8384 gboolean delegate_invoke = FALSE;
8387 token = read32 (ip + 1);
8392 //GSHAREDVT_FAILURE (*ip);
8397 fsig = mini_get_signature (method, token, generic_context);
8398 n = fsig->param_count + fsig->hasthis;
8400 if (method->dynamic && fsig->pinvoke) {
8404 * This is a call through a function pointer using a pinvoke
8405 * signature. Have to create a wrapper and call that instead.
8406 * FIXME: This is very slow, need to create a wrapper at JIT time
8407 * instead based on the signature.
8409 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8410 EMIT_NEW_PCONST (cfg, args [1], fsig);
8412 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8415 MonoMethod *cil_method;
8417 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8418 cil_method = cmethod;
8420 if (constrained_call) {
8421 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8422 if (cfg->verbose_level > 2)
8423 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8424 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8425 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8426 cfg->generic_sharing_context)) {
8427 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8431 if (cfg->verbose_level > 2)
8432 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8434 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8436 * This is needed since get_method_constrained can't find
8437 * the method in klass representing a type var.
8438 * The type var is guaranteed to be a reference type in this
8441 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8442 g_assert (!cmethod->klass->valuetype);
8444 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8450 if (!cmethod || mono_loader_get_last_error ())
8452 if (!dont_verify && !cfg->skip_visibility) {
8453 MonoMethod *target_method = cil_method;
8454 if (method->is_inflated) {
8455 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8457 if (!mono_method_can_access_method (method_definition, target_method) &&
8458 !mono_method_can_access_method (method, cil_method))
8459 METHOD_ACCESS_FAILURE (method, cil_method);
8462 if (mono_security_core_clr_enabled ())
8463 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8465 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8466 /* MS.NET seems to silently convert this to a callvirt */
8471 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8472 * converts to a callvirt.
8474 * tests/bug-515884.il is an example of this behavior
8476 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8477 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8478 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8482 if (!cmethod->klass->inited)
8483 if (!mono_class_init (cmethod->klass))
8484 TYPE_LOAD_ERROR (cmethod->klass);
8486 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8487 mini_class_is_system_array (cmethod->klass)) {
8488 array_rank = cmethod->klass->rank;
8489 fsig = mono_method_signature (cmethod);
8491 fsig = mono_method_signature (cmethod);
8496 if (fsig->pinvoke) {
8497 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8498 check_for_pending_exc, cfg->compile_aot);
8499 fsig = mono_method_signature (wrapper);
8500 } else if (constrained_call) {
8501 fsig = mono_method_signature (cmethod);
8503 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8508 mono_save_token_info (cfg, image, token, cil_method);
8510 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8511 need_seq_point = TRUE;
8513 n = fsig->param_count + fsig->hasthis;
8515 /* Don't support calls made using type arguments for now */
8517 if (cfg->gsharedvt) {
8518 if (mini_is_gsharedvt_signature (cfg, fsig))
8519 GSHAREDVT_FAILURE (*ip);
8523 if (mono_security_cas_enabled ()) {
8524 if (check_linkdemand (cfg, method, cmethod))
8525 INLINE_FAILURE ("linkdemand");
8526 CHECK_CFG_EXCEPTION;
8529 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8530 g_assert_not_reached ();
8533 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8536 if (!cfg->generic_sharing_context && cmethod)
8537 g_assert (!mono_method_check_context_used (cmethod));
8541 //g_assert (!virtual || fsig->hasthis);
8545 if (constrained_call) {
8546 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8548 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8550 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8551 /* The 'Own method' case below */
8552 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8553 /* 'The type parameter is instantiated as a reference type' case below. */
8554 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8555 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8556 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8557 MonoInst *args [16];
8560 * This case handles calls to
8561 * - object:ToString()/Equals()/GetHashCode(),
8562 * - System.IComparable<T>:CompareTo()
8563 * - System.IEquatable<T>:Equals ()
8564 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8568 if (mono_method_check_context_used (cmethod))
8569 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8571 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8572 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8574 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8575 if (fsig->hasthis && fsig->param_count) {
8576 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8577 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8578 ins->dreg = alloc_preg (cfg);
8579 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8580 MONO_ADD_INS (cfg->cbb, ins);
8583 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8586 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8588 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8589 addr_reg = ins->dreg;
8590 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8592 EMIT_NEW_ICONST (cfg, args [3], 0);
8593 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8596 EMIT_NEW_ICONST (cfg, args [3], 0);
8597 EMIT_NEW_ICONST (cfg, args [4], 0);
8599 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8602 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8603 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8604 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8608 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8609 MONO_ADD_INS (cfg->cbb, add);
8611 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8612 MONO_ADD_INS (cfg->cbb, ins);
8613 /* ins represents the call result */
8618 GSHAREDVT_FAILURE (*ip);
8622 * We have the `constrained.' prefix opcode.
8624 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8626 * The type parameter is instantiated as a valuetype,
8627 * but that type doesn't override the method we're
8628 * calling, so we need to box `this'.
8630 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8631 ins->klass = constrained_call;
8632 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8633 CHECK_CFG_EXCEPTION;
8634 } else if (!constrained_call->valuetype) {
8635 int dreg = alloc_ireg_ref (cfg);
8638 * The type parameter is instantiated as a reference
8639 * type. We have a managed pointer on the stack, so
8640 * we need to dereference it here.
8642 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8643 ins->type = STACK_OBJ;
8646 if (cmethod->klass->valuetype) {
8649 /* Interface method */
8652 mono_class_setup_vtable (constrained_call);
8653 CHECK_TYPELOAD (constrained_call);
8654 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8656 TYPE_LOAD_ERROR (constrained_call);
8657 slot = mono_method_get_vtable_slot (cmethod);
8659 TYPE_LOAD_ERROR (cmethod->klass);
8660 cmethod = constrained_call->vtable [ioffset + slot];
8662 if (cmethod->klass == mono_defaults.enum_class) {
8663 /* Enum implements some interfaces, so treat this as the first case */
8664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8665 ins->klass = constrained_call;
8666 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8667 CHECK_CFG_EXCEPTION;
8672 constrained_call = NULL;
8675 if (!calli && check_call_signature (cfg, fsig, sp))
8678 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8679 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8680 delegate_invoke = TRUE;
8683 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8685 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8686 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8694 * If the callee is a shared method, then its static cctor
8695 * might not get called after the call was patched.
8697 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8698 emit_generic_class_init (cfg, cmethod->klass);
8699 CHECK_TYPELOAD (cmethod->klass);
8703 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8705 if (cfg->generic_sharing_context && cmethod) {
8706 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8708 context_used = mini_method_check_context_used (cfg, cmethod);
8710 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8711 /* Generic method interface
8712 calls are resolved via a
8713 helper function and don't
8715 if (!cmethod_context || !cmethod_context->method_inst)
8716 pass_imt_from_rgctx = TRUE;
8720 * If a shared method calls another
8721 * shared method then the caller must
8722 * have a generic sharing context
8723 * because the magic trampoline
8724 * requires it. FIXME: We shouldn't
8725 * have to force the vtable/mrgctx
8726 * variable here. Instead there
8727 * should be a flag in the cfg to
8728 * request a generic sharing context.
8731 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8732 mono_get_vtable_var (cfg);
8737 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8739 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8741 CHECK_TYPELOAD (cmethod->klass);
8742 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8747 g_assert (!vtable_arg);
8749 if (!cfg->compile_aot) {
8751 * emit_get_rgctx_method () calls mono_class_vtable () so check
8752 * for type load errors before.
8754 mono_class_setup_vtable (cmethod->klass);
8755 CHECK_TYPELOAD (cmethod->klass);
8758 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8760 /* !marshalbyref is needed to properly handle generic methods + remoting */
8761 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8762 MONO_METHOD_IS_FINAL (cmethod)) &&
8763 !mono_class_is_marshalbyref (cmethod->klass)) {
8770 if (pass_imt_from_rgctx) {
8771 g_assert (!pass_vtable);
8774 imt_arg = emit_get_rgctx_method (cfg, context_used,
8775 cmethod, MONO_RGCTX_INFO_METHOD);
8779 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8781 /* Calling virtual generic methods */
8782 if (cmethod && virtual &&
8783 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8784 !(MONO_METHOD_IS_FINAL (cmethod) &&
8785 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8786 fsig->generic_param_count &&
8787 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8788 MonoInst *this_temp, *this_arg_temp, *store;
8789 MonoInst *iargs [4];
8790 gboolean use_imt = FALSE;
8792 g_assert (fsig->is_inflated);
8794 /* Prevent inlining of methods that contain indirect calls */
8795 INLINE_FAILURE ("virtual generic call");
8797 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8798 GSHAREDVT_FAILURE (*ip);
8800 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8801 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8806 g_assert (!imt_arg);
8808 g_assert (cmethod->is_inflated);
8809 imt_arg = emit_get_rgctx_method (cfg, context_used,
8810 cmethod, MONO_RGCTX_INFO_METHOD);
8811 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8813 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8814 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8815 MONO_ADD_INS (bblock, store);
8817 /* FIXME: This should be a managed pointer */
8818 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8820 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8821 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8822 cmethod, MONO_RGCTX_INFO_METHOD);
8823 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8824 addr = mono_emit_jit_icall (cfg,
8825 mono_helper_compile_generic_method, iargs);
8827 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8829 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8836 * Implement a workaround for the inherent races involved in locking:
8842 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8843 * try block, the Exit () won't be executed, see:
8844 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8845 * To work around this, we extend such try blocks to include the last x bytes
8846 * of the Monitor.Enter () call.
8848 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8849 MonoBasicBlock *tbb;
8851 GET_BBLOCK (cfg, tbb, ip + 5);
8853 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8854 * from Monitor.Enter like ArgumentNullException.
8856 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8857 /* Mark this bblock as needing to be extended */
8858 tbb->extend_try_block = TRUE;
8862 /* Conversion to a JIT intrinsic */
8863 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8865 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8866 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8873 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8874 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8875 mono_method_check_inlining (cfg, cmethod)) {
8877 gboolean always = FALSE;
8879 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8880 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8881 /* Prevent inlining of methods that call wrappers */
8882 INLINE_FAILURE ("wrapper call");
8883 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8887 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8889 cfg->real_offset += 5;
8891 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8892 /* *sp is already set by inline_method */
8897 inline_costs += costs;
8903 /* Tail recursion elimination */
8904 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8905 gboolean has_vtargs = FALSE;
8908 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8909 INLINE_FAILURE ("tail call");
8911 /* keep it simple */
8912 for (i = fsig->param_count - 1; i >= 0; i--) {
8913 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8918 for (i = 0; i < n; ++i)
8919 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8920 MONO_INST_NEW (cfg, ins, OP_BR);
8921 MONO_ADD_INS (bblock, ins);
8922 tblock = start_bblock->out_bb [0];
8923 link_bblock (cfg, bblock, tblock);
8924 ins->inst_target_bb = tblock;
8925 start_new_bblock = 1;
8927 /* skip the CEE_RET, too */
8928 if (ip_in_bb (cfg, bblock, ip + 5))
8935 inline_costs += 10 * num_calls++;
8938 * Making generic calls out of gsharedvt methods.
8939 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8940 * patching gshared method addresses into a gsharedvt method.
8942 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8943 MonoRgctxInfoType info_type;
8946 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8947 //GSHAREDVT_FAILURE (*ip);
8948 // disable for possible remoting calls
8949 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8950 GSHAREDVT_FAILURE (*ip);
8951 if (fsig->generic_param_count) {
8952 /* virtual generic call */
8953 g_assert (mono_use_imt);
8954 g_assert (!imt_arg);
8955 /* Same as the virtual generic case above */
8956 imt_arg = emit_get_rgctx_method (cfg, context_used,
8957 cmethod, MONO_RGCTX_INFO_METHOD);
8958 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8960 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8961 /* This can happen when we call a fully instantiated iface method */
8962 imt_arg = emit_get_rgctx_method (cfg, context_used,
8963 cmethod, MONO_RGCTX_INFO_METHOD);
8968 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8969 /* test_0_multi_dim_arrays () in gshared.cs */
8970 GSHAREDVT_FAILURE (*ip);
8972 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8973 keep_this_alive = sp [0];
8975 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8976 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8978 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8979 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8981 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8983 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8985 * We pass the address to the gsharedvt trampoline in the rgctx reg
8987 MonoInst *callee = addr;
8989 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8991 GSHAREDVT_FAILURE (*ip);
8993 addr = emit_get_rgctx_sig (cfg, context_used,
8994 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8995 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8999 /* Generic sharing */
9002 * Use this if the callee is gsharedvt sharable too, since
9003 * at runtime we might find an instantiation so the call cannot
9004 * be patched (the 'no_patch' code path in mini-trampolines.c).
9006 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9007 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9008 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9009 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9010 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9011 INLINE_FAILURE ("gshared");
9013 g_assert (cfg->generic_sharing_context && cmethod);
9017 * We are compiling a call to a
9018 * generic method from shared code,
9019 * which means that we have to look up
9020 * the method in the rgctx and do an
9024 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9026 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9027 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9031 /* Indirect calls */
9033 if (call_opcode == CEE_CALL)
9034 g_assert (context_used);
9035 else if (call_opcode == CEE_CALLI)
9036 g_assert (!vtable_arg);
9038 /* FIXME: what the hell is this??? */
9039 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9040 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9042 /* Prevent inlining of methods with indirect calls */
9043 INLINE_FAILURE ("indirect call");
9045 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9050 * Instead of emitting an indirect call, emit a direct call
9051 * with the contents of the aotconst as the patch info.
9053 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9054 info_type = addr->inst_c1;
9055 info_data = addr->inst_p0;
9057 info_type = addr->inst_right->inst_c1;
9058 info_data = addr->inst_right->inst_left;
9061 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9062 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9067 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9075 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9076 MonoInst *val = sp [fsig->param_count];
9078 if (val->type == STACK_OBJ) {
9079 MonoInst *iargs [2];
9084 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9087 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9088 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9089 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9090 emit_write_barrier (cfg, addr, val);
9091 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9092 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9094 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9095 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9096 if (!cmethod->klass->element_class->valuetype && !readonly)
9097 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9098 CHECK_TYPELOAD (cmethod->klass);
9101 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9104 g_assert_not_reached ();
9111 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9115 /* Tail prefix / tail call optimization */
9117 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9118 /* FIXME: runtime generic context pointer for jumps? */
9119 /* FIXME: handle this for generic sharing eventually */
9120 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9121 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9122 supported_tail_call = TRUE;
9124 if (supported_tail_call) {
9127 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9128 INLINE_FAILURE ("tail call");
9130 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9132 if (ARCH_HAVE_OP_TAIL_CALL) {
9133 /* Handle tail calls similarly to normal calls */
9136 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9138 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9139 call->tail_call = TRUE;
9140 call->method = cmethod;
9141 call->signature = mono_method_signature (cmethod);
9144 * We implement tail calls by storing the actual arguments into the
9145 * argument variables, then emitting a CEE_JMP.
9147 for (i = 0; i < n; ++i) {
9148 /* Prevent argument from being register allocated */
9149 arg_array [i]->flags |= MONO_INST_VOLATILE;
9150 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9152 ins = (MonoInst*)call;
9153 ins->inst_p0 = cmethod;
9154 ins->inst_p1 = arg_array [0];
9155 MONO_ADD_INS (bblock, ins);
9156 link_bblock (cfg, bblock, end_bblock);
9157 start_new_bblock = 1;
9159 // FIXME: Eliminate unreachable epilogs
9162 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9163 * only reachable from this call.
9165 GET_BBLOCK (cfg, tblock, ip + 5);
9166 if (tblock == bblock || tblock->in_count == 0)
9175 * Synchronized wrappers.
9176 * Its hard to determine where to replace a method with its synchronized
9177 * wrapper without causing an infinite recursion. The current solution is
9178 * to add the synchronized wrapper in the trampolines, and to
9179 * change the called method to a dummy wrapper, and resolve that wrapper
9180 * to the real method in mono_jit_compile_method ().
9182 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9183 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9184 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9185 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9189 INLINE_FAILURE ("call");
9190 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9191 imt_arg, vtable_arg);
9194 link_bblock (cfg, bblock, end_bblock);
9195 start_new_bblock = 1;
9197 // FIXME: Eliminate unreachable epilogs
9200 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9201 * only reachable from this call.
9203 GET_BBLOCK (cfg, tblock, ip + 5);
9204 if (tblock == bblock || tblock->in_count == 0)
9211 /* End of call, INS should contain the result of the call, if any */
9213 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9216 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9221 if (keep_this_alive) {
9222 MonoInst *dummy_use;
9224 /* See mono_emit_method_call_full () */
9225 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9228 CHECK_CFG_EXCEPTION;
9232 g_assert (*ip == CEE_RET);
9236 constrained_call = NULL;
9238 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9242 if (cfg->method != method) {
9243 /* return from inlined method */
9245 * If in_count == 0, that means the ret is unreachable due to
9246 * being preceeded by a throw. In that case, inline_method () will
9247 * handle setting the return value
9248 * (test case: test_0_inline_throw ()).
9250 if (return_var && cfg->cbb->in_count) {
9251 MonoType *ret_type = mono_method_signature (method)->ret;
9257 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9260 //g_assert (returnvar != -1);
9261 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9262 cfg->ret_var_set = TRUE;
9265 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9267 if (cfg->lmf_var && cfg->cbb->in_count)
9271 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
9273 if (seq_points && !sym_seq_points) {
9275 * Place a seq point here too even through the IL stack is not
9276 * empty, so a step over on
9279 * will work correctly.
9281 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9282 MONO_ADD_INS (cfg->cbb, ins);
9285 g_assert (!return_var);
9289 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9292 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9295 if (!cfg->vret_addr) {
9298 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9300 EMIT_NEW_RETLOADA (cfg, ret_addr);
9302 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9303 ins->klass = mono_class_from_mono_type (ret_type);
9306 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9307 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9308 MonoInst *iargs [1];
9312 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9313 mono_arch_emit_setret (cfg, method, conv);
9315 mono_arch_emit_setret (cfg, method, *sp);
9318 mono_arch_emit_setret (cfg, method, *sp);
9323 if (sp != stack_start)
9325 MONO_INST_NEW (cfg, ins, OP_BR);
9327 ins->inst_target_bb = end_bblock;
9328 MONO_ADD_INS (bblock, ins);
9329 link_bblock (cfg, bblock, end_bblock);
9330 start_new_bblock = 1;
9334 MONO_INST_NEW (cfg, ins, OP_BR);
9336 target = ip + 1 + (signed char)(*ip);
9338 GET_BBLOCK (cfg, tblock, target);
9339 link_bblock (cfg, bblock, tblock);
9340 ins->inst_target_bb = tblock;
9341 if (sp != stack_start) {
9342 handle_stack_args (cfg, stack_start, sp - stack_start);
9344 CHECK_UNVERIFIABLE (cfg);
9346 MONO_ADD_INS (bblock, ins);
9347 start_new_bblock = 1;
9348 inline_costs += BRANCH_COST;
9362 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9364 target = ip + 1 + *(signed char*)ip;
9370 inline_costs += BRANCH_COST;
9374 MONO_INST_NEW (cfg, ins, OP_BR);
9377 target = ip + 4 + (gint32)read32(ip);
9379 GET_BBLOCK (cfg, tblock, target);
9380 link_bblock (cfg, bblock, tblock);
9381 ins->inst_target_bb = tblock;
9382 if (sp != stack_start) {
9383 handle_stack_args (cfg, stack_start, sp - stack_start);
9385 CHECK_UNVERIFIABLE (cfg);
9388 MONO_ADD_INS (bblock, ins);
9390 start_new_bblock = 1;
9391 inline_costs += BRANCH_COST;
9398 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9399 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9400 guint32 opsize = is_short ? 1 : 4;
9402 CHECK_OPSIZE (opsize);
9404 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9407 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9412 GET_BBLOCK (cfg, tblock, target);
9413 link_bblock (cfg, bblock, tblock);
9414 GET_BBLOCK (cfg, tblock, ip);
9415 link_bblock (cfg, bblock, tblock);
9417 if (sp != stack_start) {
9418 handle_stack_args (cfg, stack_start, sp - stack_start);
9419 CHECK_UNVERIFIABLE (cfg);
9422 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9423 cmp->sreg1 = sp [0]->dreg;
9424 type_from_op (cmp, sp [0], NULL);
9427 #if SIZEOF_REGISTER == 4
9428 if (cmp->opcode == OP_LCOMPARE_IMM) {
9429 /* Convert it to OP_LCOMPARE */
9430 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9431 ins->type = STACK_I8;
9432 ins->dreg = alloc_dreg (cfg, STACK_I8);
9434 MONO_ADD_INS (bblock, ins);
9435 cmp->opcode = OP_LCOMPARE;
9436 cmp->sreg2 = ins->dreg;
9439 MONO_ADD_INS (bblock, cmp);
9441 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9442 type_from_op (ins, sp [0], NULL);
9443 MONO_ADD_INS (bblock, ins);
9444 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9445 GET_BBLOCK (cfg, tblock, target);
9446 ins->inst_true_bb = tblock;
9447 GET_BBLOCK (cfg, tblock, ip);
9448 ins->inst_false_bb = tblock;
9449 start_new_bblock = 2;
9452 inline_costs += BRANCH_COST;
9467 MONO_INST_NEW (cfg, ins, *ip);
9469 target = ip + 4 + (gint32)read32(ip);
9475 inline_costs += BRANCH_COST;
9479 MonoBasicBlock **targets;
9480 MonoBasicBlock *default_bblock;
9481 MonoJumpInfoBBTable *table;
9482 int offset_reg = alloc_preg (cfg);
9483 int target_reg = alloc_preg (cfg);
9484 int table_reg = alloc_preg (cfg);
9485 int sum_reg = alloc_preg (cfg);
9486 gboolean use_op_switch;
9490 n = read32 (ip + 1);
9493 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9497 CHECK_OPSIZE (n * sizeof (guint32));
9498 target = ip + n * sizeof (guint32);
9500 GET_BBLOCK (cfg, default_bblock, target);
9501 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9503 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9504 for (i = 0; i < n; ++i) {
9505 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9506 targets [i] = tblock;
9507 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9511 if (sp != stack_start) {
9513 * Link the current bb with the targets as well, so handle_stack_args
9514 * will set their in_stack correctly.
9516 link_bblock (cfg, bblock, default_bblock);
9517 for (i = 0; i < n; ++i)
9518 link_bblock (cfg, bblock, targets [i]);
9520 handle_stack_args (cfg, stack_start, sp - stack_start);
9522 CHECK_UNVERIFIABLE (cfg);
9525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9526 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9529 for (i = 0; i < n; ++i)
9530 link_bblock (cfg, bblock, targets [i]);
9532 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9533 table->table = targets;
9534 table->table_size = n;
9536 use_op_switch = FALSE;
9538 /* ARM implements SWITCH statements differently */
9539 /* FIXME: Make it use the generic implementation */
9540 if (!cfg->compile_aot)
9541 use_op_switch = TRUE;
9544 if (COMPILE_LLVM (cfg))
9545 use_op_switch = TRUE;
9547 cfg->cbb->has_jump_table = 1;
9549 if (use_op_switch) {
9550 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9551 ins->sreg1 = src1->dreg;
9552 ins->inst_p0 = table;
9553 ins->inst_many_bb = targets;
9554 ins->klass = GUINT_TO_POINTER (n);
9555 MONO_ADD_INS (cfg->cbb, ins);
9557 if (sizeof (gpointer) == 8)
9558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9562 #if SIZEOF_REGISTER == 8
9563 /* The upper word might not be zero, and we add it to a 64 bit address later */
9564 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9567 if (cfg->compile_aot) {
9568 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9570 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9571 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9572 ins->inst_p0 = table;
9573 ins->dreg = table_reg;
9574 MONO_ADD_INS (cfg->cbb, ins);
9577 /* FIXME: Use load_memindex */
9578 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9580 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9582 start_new_bblock = 1;
9583 inline_costs += (BRANCH_COST * 2);
9603 dreg = alloc_freg (cfg);
9606 dreg = alloc_lreg (cfg);
9609 dreg = alloc_ireg_ref (cfg);
9612 dreg = alloc_preg (cfg);
9615 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9616 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9617 ins->flags |= ins_flag;
9618 MONO_ADD_INS (bblock, ins);
9620 if (ins_flag & MONO_INST_VOLATILE) {
9621 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9622 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9638 if (ins_flag & MONO_INST_VOLATILE) {
9639 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9640 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9643 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9644 ins->flags |= ins_flag;
9647 MONO_ADD_INS (bblock, ins);
9649 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9650 emit_write_barrier (cfg, sp [0], sp [1]);
9659 MONO_INST_NEW (cfg, ins, (*ip));
9661 ins->sreg1 = sp [0]->dreg;
9662 ins->sreg2 = sp [1]->dreg;
9663 type_from_op (ins, sp [0], sp [1]);
9665 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9667 /* Use the immediate opcodes if possible */
9668 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9669 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9670 if (imm_opcode != -1) {
9671 ins->opcode = imm_opcode;
9672 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9675 NULLIFY_INS (sp [1]);
9679 MONO_ADD_INS ((cfg)->cbb, (ins));
9681 *sp++ = mono_decompose_opcode (cfg, ins);
9698 MONO_INST_NEW (cfg, ins, (*ip));
9700 ins->sreg1 = sp [0]->dreg;
9701 ins->sreg2 = sp [1]->dreg;
9702 type_from_op (ins, sp [0], sp [1]);
9704 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9705 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9707 /* FIXME: Pass opcode to is_inst_imm */
9709 /* Use the immediate opcodes if possible */
9710 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9713 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9714 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9715 /* Keep emulated opcodes which are optimized away later */
9716 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9717 imm_opcode = mono_op_to_op_imm (ins->opcode);
9720 if (imm_opcode != -1) {
9721 ins->opcode = imm_opcode;
9722 if (sp [1]->opcode == OP_I8CONST) {
9723 #if SIZEOF_REGISTER == 8
9724 ins->inst_imm = sp [1]->inst_l;
9726 ins->inst_ls_word = sp [1]->inst_ls_word;
9727 ins->inst_ms_word = sp [1]->inst_ms_word;
9731 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9734 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9735 if (sp [1]->next == NULL)
9736 NULLIFY_INS (sp [1]);
9739 MONO_ADD_INS ((cfg)->cbb, (ins));
9741 *sp++ = mono_decompose_opcode (cfg, ins);
9754 case CEE_CONV_OVF_I8:
9755 case CEE_CONV_OVF_U8:
9759 /* Special case this earlier so we have long constants in the IR */
9760 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9761 int data = sp [-1]->inst_c0;
9762 sp [-1]->opcode = OP_I8CONST;
9763 sp [-1]->type = STACK_I8;
9764 #if SIZEOF_REGISTER == 8
9765 if ((*ip) == CEE_CONV_U8)
9766 sp [-1]->inst_c0 = (guint32)data;
9768 sp [-1]->inst_c0 = data;
9770 sp [-1]->inst_ls_word = data;
9771 if ((*ip) == CEE_CONV_U8)
9772 sp [-1]->inst_ms_word = 0;
9774 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9776 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9783 case CEE_CONV_OVF_I4:
9784 case CEE_CONV_OVF_I1:
9785 case CEE_CONV_OVF_I2:
9786 case CEE_CONV_OVF_I:
9787 case CEE_CONV_OVF_U:
9790 if (sp [-1]->type == STACK_R8) {
9791 ADD_UNOP (CEE_CONV_OVF_I8);
9798 case CEE_CONV_OVF_U1:
9799 case CEE_CONV_OVF_U2:
9800 case CEE_CONV_OVF_U4:
9803 if (sp [-1]->type == STACK_R8) {
9804 ADD_UNOP (CEE_CONV_OVF_U8);
9811 case CEE_CONV_OVF_I1_UN:
9812 case CEE_CONV_OVF_I2_UN:
9813 case CEE_CONV_OVF_I4_UN:
9814 case CEE_CONV_OVF_I8_UN:
9815 case CEE_CONV_OVF_U1_UN:
9816 case CEE_CONV_OVF_U2_UN:
9817 case CEE_CONV_OVF_U4_UN:
9818 case CEE_CONV_OVF_U8_UN:
9819 case CEE_CONV_OVF_I_UN:
9820 case CEE_CONV_OVF_U_UN:
9827 CHECK_CFG_EXCEPTION;
9831 case CEE_ADD_OVF_UN:
9833 case CEE_MUL_OVF_UN:
9835 case CEE_SUB_OVF_UN:
9841 GSHAREDVT_FAILURE (*ip);
9844 token = read32 (ip + 1);
9845 klass = mini_get_class (method, token, generic_context);
9846 CHECK_TYPELOAD (klass);
9848 if (generic_class_is_reference_type (cfg, klass)) {
9849 MonoInst *store, *load;
9850 int dreg = alloc_ireg_ref (cfg);
9852 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9853 load->flags |= ins_flag;
9854 MONO_ADD_INS (cfg->cbb, load);
9856 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9857 store->flags |= ins_flag;
9858 MONO_ADD_INS (cfg->cbb, store);
9860 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9861 emit_write_barrier (cfg, sp [0], sp [1]);
9863 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9875 token = read32 (ip + 1);
9876 klass = mini_get_class (method, token, generic_context);
9877 CHECK_TYPELOAD (klass);
9879 /* Optimize the common ldobj+stloc combination */
9889 loc_index = ip [5] - CEE_STLOC_0;
9896 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9897 CHECK_LOCAL (loc_index);
9899 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9900 ins->dreg = cfg->locals [loc_index]->dreg;
9901 ins->flags |= ins_flag;
9904 if (ins_flag & MONO_INST_VOLATILE) {
9905 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9906 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9912 /* Optimize the ldobj+stobj combination */
9913 /* The reference case ends up being a load+store anyway */
9914 /* Skip this if the operation is volatile. */
9915 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9920 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9927 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9928 ins->flags |= ins_flag;
9931 if (ins_flag & MONO_INST_VOLATILE) {
9932 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9933 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9942 CHECK_STACK_OVF (1);
9944 n = read32 (ip + 1);
9946 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9947 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9948 ins->type = STACK_OBJ;
9951 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9952 MonoInst *iargs [1];
9953 char *str = mono_method_get_wrapper_data (method, n);
9955 if (cfg->compile_aot)
9956 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9958 EMIT_NEW_PCONST (cfg, iargs [0], str);
9959 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9961 if (cfg->opt & MONO_OPT_SHARED) {
9962 MonoInst *iargs [3];
9964 if (cfg->compile_aot) {
9965 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9967 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9968 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9969 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9970 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9971 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9973 if (bblock->out_of_line) {
9974 MonoInst *iargs [2];
9976 if (image == mono_defaults.corlib) {
9978 * Avoid relocations in AOT and save some space by using a
9979 * version of helper_ldstr specialized to mscorlib.
9981 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9982 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9984 /* Avoid creating the string object */
9985 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9986 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9987 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9991 if (cfg->compile_aot) {
9992 NEW_LDSTRCONST (cfg, ins, image, n);
9994 MONO_ADD_INS (bblock, ins);
9997 NEW_PCONST (cfg, ins, NULL);
9998 ins->type = STACK_OBJ;
9999 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10001 OUT_OF_MEMORY_FAILURE;
10004 MONO_ADD_INS (bblock, ins);
10013 MonoInst *iargs [2];
10014 MonoMethodSignature *fsig;
10017 MonoInst *vtable_arg = NULL;
10020 token = read32 (ip + 1);
10021 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10022 if (!cmethod || mono_loader_get_last_error ())
10024 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10027 mono_save_token_info (cfg, image, token, cmethod);
10029 if (!mono_class_init (cmethod->klass))
10030 TYPE_LOAD_ERROR (cmethod->klass);
10032 context_used = mini_method_check_context_used (cfg, cmethod);
10034 if (mono_security_cas_enabled ()) {
10035 if (check_linkdemand (cfg, method, cmethod))
10036 INLINE_FAILURE ("linkdemand");
10037 CHECK_CFG_EXCEPTION;
10038 } else if (mono_security_core_clr_enabled ()) {
10039 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10042 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10043 emit_generic_class_init (cfg, cmethod->klass);
10044 CHECK_TYPELOAD (cmethod->klass);
10048 if (cfg->gsharedvt) {
10049 if (mini_is_gsharedvt_variable_signature (sig))
10050 GSHAREDVT_FAILURE (*ip);
10054 n = fsig->param_count;
10058 * Generate smaller code for the common newobj <exception> instruction in
10059 * argument checking code.
10061 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10062 is_exception_class (cmethod->klass) && n <= 2 &&
10063 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10064 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10065 MonoInst *iargs [3];
10069 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10072 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10075 iargs [1] = sp [0];
10076 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10079 iargs [1] = sp [0];
10080 iargs [2] = sp [1];
10081 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10084 g_assert_not_reached ();
10092 /* move the args to allow room for 'this' in the first position */
10098 /* check_call_signature () requires sp[0] to be set */
10099 this_ins.type = STACK_OBJ;
10100 sp [0] = &this_ins;
10101 if (check_call_signature (cfg, fsig, sp))
10106 if (mini_class_is_system_array (cmethod->klass)) {
10107 *sp = emit_get_rgctx_method (cfg, context_used,
10108 cmethod, MONO_RGCTX_INFO_METHOD);
10110 /* Avoid varargs in the common case */
10111 if (fsig->param_count == 1)
10112 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10113 else if (fsig->param_count == 2)
10114 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10115 else if (fsig->param_count == 3)
10116 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10117 else if (fsig->param_count == 4)
10118 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10120 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10121 } else if (cmethod->string_ctor) {
10122 g_assert (!context_used);
10123 g_assert (!vtable_arg);
10124 /* we simply pass a null pointer */
10125 EMIT_NEW_PCONST (cfg, *sp, NULL);
10126 /* now call the string ctor */
10127 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10129 if (cmethod->klass->valuetype) {
10130 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10131 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10132 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10137 * The code generated by mini_emit_virtual_call () expects
10138 * iargs [0] to be a boxed instance, but luckily the vcall
10139 * will be transformed into a normal call there.
10141 } else if (context_used) {
10142 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10145 MonoVTable *vtable = NULL;
10147 if (!cfg->compile_aot)
10148 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10149 CHECK_TYPELOAD (cmethod->klass);
10152 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10153 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10154 * As a workaround, we call class cctors before allocating objects.
10156 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10157 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10158 if (cfg->verbose_level > 2)
10159 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10160 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10163 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10166 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10169 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10171 /* Now call the actual ctor */
10172 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10173 CHECK_CFG_EXCEPTION;
10176 if (alloc == NULL) {
10178 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10179 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10187 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10188 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10191 case CEE_CASTCLASS:
10195 token = read32 (ip + 1);
10196 klass = mini_get_class (method, token, generic_context);
10197 CHECK_TYPELOAD (klass);
10198 if (sp [0]->type != STACK_OBJ)
10201 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10202 CHECK_CFG_EXCEPTION;
10211 token = read32 (ip + 1);
10212 klass = mini_get_class (method, token, generic_context);
10213 CHECK_TYPELOAD (klass);
10214 if (sp [0]->type != STACK_OBJ)
10217 context_used = mini_class_check_context_used (cfg, klass);
10219 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10220 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10221 MonoInst *args [3];
10227 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10230 if (cfg->compile_aot)
10231 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10233 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10235 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10238 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10239 MonoMethod *mono_isinst;
10240 MonoInst *iargs [1];
10243 mono_isinst = mono_marshal_get_isinst (klass);
10244 iargs [0] = sp [0];
10246 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10247 iargs, ip, cfg->real_offset, TRUE, &bblock);
10248 CHECK_CFG_EXCEPTION;
10249 g_assert (costs > 0);
10252 cfg->real_offset += 5;
10256 inline_costs += costs;
10259 ins = handle_isinst (cfg, klass, *sp, context_used);
10260 CHECK_CFG_EXCEPTION;
10267 case CEE_UNBOX_ANY: {
10268 MonoInst *res, *addr;
10273 token = read32 (ip + 1);
10274 klass = mini_get_class (method, token, generic_context);
10275 CHECK_TYPELOAD (klass);
10277 mono_save_token_info (cfg, image, token, klass);
10279 context_used = mini_class_check_context_used (cfg, klass);
10281 if (mini_is_gsharedvt_klass (cfg, klass)) {
10282 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10284 } else if (generic_class_is_reference_type (cfg, klass)) {
10285 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10286 CHECK_CFG_EXCEPTION;
10287 } else if (mono_class_is_nullable (klass)) {
10288 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10290 addr = handle_unbox (cfg, klass, sp, context_used);
10292 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10303 MonoClass *enum_class;
10304 MonoMethod *has_flag;
10310 token = read32 (ip + 1);
10311 klass = mini_get_class (method, token, generic_context);
10312 CHECK_TYPELOAD (klass);
10314 mono_save_token_info (cfg, image, token, klass);
10316 context_used = mini_class_check_context_used (cfg, klass);
10318 if (generic_class_is_reference_type (cfg, klass)) {
10324 if (klass == mono_defaults.void_class)
10326 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10328 /* frequent check in generic code: box (struct), brtrue */
10333 * <push int/long ptr>
10336 * constrained. MyFlags
10337 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10339 * If we find this sequence and the operand types on box and constrained
10340 * are equal, we can emit a specialized instruction sequence instead of
10341 * the very slow HasFlag () call.
10343 if ((cfg->opt & MONO_OPT_INTRINS) &&
10344 /* Cheap checks first. */
10345 ip + 5 + 6 + 5 < end &&
10346 ip [5] == CEE_PREFIX1 &&
10347 ip [6] == CEE_CONSTRAINED_ &&
10348 ip [11] == CEE_CALLVIRT &&
10349 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10350 mono_class_is_enum (klass) &&
10351 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10352 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10353 has_flag->klass == mono_defaults.enum_class &&
10354 !strcmp (has_flag->name, "HasFlag") &&
10355 has_flag->signature->hasthis &&
10356 has_flag->signature->param_count == 1) {
10357 CHECK_TYPELOAD (enum_class);
10359 if (enum_class == klass) {
10360 MonoInst *enum_this, *enum_flag;
10365 enum_this = sp [0];
10366 enum_flag = sp [1];
10368 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10373 // FIXME: LLVM can't handle the inconsistent bb linking
10374 if (!mono_class_is_nullable (klass) &&
10375 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10376 (ip [5] == CEE_BRTRUE ||
10377 ip [5] == CEE_BRTRUE_S ||
10378 ip [5] == CEE_BRFALSE ||
10379 ip [5] == CEE_BRFALSE_S)) {
10380 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10382 MonoBasicBlock *true_bb, *false_bb;
10386 if (cfg->verbose_level > 3) {
10387 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10388 printf ("<box+brtrue opt>\n");
10393 case CEE_BRFALSE_S:
10396 target = ip + 1 + (signed char)(*ip);
10403 target = ip + 4 + (gint)(read32 (ip));
10407 g_assert_not_reached ();
10411 * We need to link both bblocks, since it is needed for handling stack
10412 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10413 * Branching to only one of them would lead to inconsistencies, so
10414 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10416 GET_BBLOCK (cfg, true_bb, target);
10417 GET_BBLOCK (cfg, false_bb, ip);
10419 mono_link_bblock (cfg, cfg->cbb, true_bb);
10420 mono_link_bblock (cfg, cfg->cbb, false_bb);
10422 if (sp != stack_start) {
10423 handle_stack_args (cfg, stack_start, sp - stack_start);
10425 CHECK_UNVERIFIABLE (cfg);
10428 if (COMPILE_LLVM (cfg)) {
10429 dreg = alloc_ireg (cfg);
10430 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10431 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10433 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10435 /* The JIT can't eliminate the iconst+compare */
10436 MONO_INST_NEW (cfg, ins, OP_BR);
10437 ins->inst_target_bb = is_true ? true_bb : false_bb;
10438 MONO_ADD_INS (cfg->cbb, ins);
10441 start_new_bblock = 1;
10445 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10447 CHECK_CFG_EXCEPTION;
10456 token = read32 (ip + 1);
10457 klass = mini_get_class (method, token, generic_context);
10458 CHECK_TYPELOAD (klass);
10460 mono_save_token_info (cfg, image, token, klass);
10462 context_used = mini_class_check_context_used (cfg, klass);
10464 if (mono_class_is_nullable (klass)) {
10467 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10468 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10472 ins = handle_unbox (cfg, klass, sp, context_used);
10485 MonoClassField *field;
10486 #ifndef DISABLE_REMOTING
10490 gboolean is_instance;
10492 gpointer addr = NULL;
10493 gboolean is_special_static;
10495 MonoInst *store_val = NULL;
10496 MonoInst *thread_ins;
10499 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10501 if (op == CEE_STFLD) {
10504 store_val = sp [1];
10509 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10511 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10514 if (op == CEE_STSFLD) {
10517 store_val = sp [0];
10522 token = read32 (ip + 1);
10523 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10524 field = mono_method_get_wrapper_data (method, token);
10525 klass = field->parent;
10528 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10531 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10532 FIELD_ACCESS_FAILURE (method, field);
10533 mono_class_init (klass);
10535 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10538 /* if the class is Critical then transparent code cannot access it's fields */
10539 if (!is_instance && mono_security_core_clr_enabled ())
10540 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10542 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10543 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10544 if (mono_security_core_clr_enabled ())
10545 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10549 * LDFLD etc. is usable on static fields as well, so convert those cases to
10552 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10564 g_assert_not_reached ();
10566 is_instance = FALSE;
10569 context_used = mini_class_check_context_used (cfg, klass);
10571 /* INSTANCE CASE */
10573 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10574 if (op == CEE_STFLD) {
10575 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10577 #ifndef DISABLE_REMOTING
10578 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10579 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10580 MonoInst *iargs [5];
10582 GSHAREDVT_FAILURE (op);
10584 iargs [0] = sp [0];
10585 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10586 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10587 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10589 iargs [4] = sp [1];
10591 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10592 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10593 iargs, ip, cfg->real_offset, TRUE, &bblock);
10594 CHECK_CFG_EXCEPTION;
10595 g_assert (costs > 0);
10597 cfg->real_offset += 5;
10599 inline_costs += costs;
10601 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10608 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10610 if (mini_is_gsharedvt_klass (cfg, klass)) {
10611 MonoInst *offset_ins;
10613 context_used = mini_class_check_context_used (cfg, klass);
10615 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10616 dreg = alloc_ireg_mp (cfg);
10617 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10618 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10619 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10621 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10623 if (sp [0]->opcode != OP_LDADDR)
10624 store->flags |= MONO_INST_FAULT;
10626 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10627 /* insert call to write barrier */
10631 dreg = alloc_ireg_mp (cfg);
10632 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10633 emit_write_barrier (cfg, ptr, sp [1]);
10636 store->flags |= ins_flag;
10643 #ifndef DISABLE_REMOTING
10644 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10645 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10646 MonoInst *iargs [4];
10648 GSHAREDVT_FAILURE (op);
10650 iargs [0] = sp [0];
10651 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10652 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10653 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10654 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10655 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10656 iargs, ip, cfg->real_offset, TRUE, &bblock);
10657 CHECK_CFG_EXCEPTION;
10658 g_assert (costs > 0);
10660 cfg->real_offset += 5;
10664 inline_costs += costs;
10666 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10672 if (sp [0]->type == STACK_VTYPE) {
10675 /* Have to compute the address of the variable */
10677 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10679 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10681 g_assert (var->klass == klass);
10683 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10687 if (op == CEE_LDFLDA) {
10688 if (is_magic_tls_access (field)) {
10689 GSHAREDVT_FAILURE (*ip);
10691 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10693 if (sp [0]->type == STACK_OBJ) {
10694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10695 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10698 dreg = alloc_ireg_mp (cfg);
10700 if (mini_is_gsharedvt_klass (cfg, klass)) {
10701 MonoInst *offset_ins;
10703 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10704 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10706 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10708 ins->klass = mono_class_from_mono_type (field->type);
10709 ins->type = STACK_MP;
10715 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10717 if (mini_is_gsharedvt_klass (cfg, klass)) {
10718 MonoInst *offset_ins;
10720 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10721 dreg = alloc_ireg_mp (cfg);
10722 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10725 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10727 load->flags |= ins_flag;
10728 if (sp [0]->opcode != OP_LDADDR)
10729 load->flags |= MONO_INST_FAULT;
10743 * We can only support shared generic static
10744 * field access on architectures where the
10745 * trampoline code has been extended to handle
10746 * the generic class init.
10748 #ifndef MONO_ARCH_VTABLE_REG
10749 GENERIC_SHARING_FAILURE (op);
10752 context_used = mini_class_check_context_used (cfg, klass);
10754 ftype = mono_field_get_type (field);
10756 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10759 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10760 * to be called here.
10762 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10763 mono_class_vtable (cfg->domain, klass);
10764 CHECK_TYPELOAD (klass);
10766 mono_domain_lock (cfg->domain);
10767 if (cfg->domain->special_static_fields)
10768 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10769 mono_domain_unlock (cfg->domain);
10771 is_special_static = mono_class_field_is_special_static (field);
10773 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10774 thread_ins = mono_get_thread_intrinsic (cfg);
10778 /* Generate IR to compute the field address */
10779 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10781 * Fast access to TLS data
10782 * Inline version of get_thread_static_data () in
10786 int idx, static_data_reg, array_reg, dreg;
10788 GSHAREDVT_FAILURE (op);
10790 // offset &= 0x7fffffff;
10791 // idx = (offset >> 24) - 1;
10792 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10793 MONO_ADD_INS (cfg->cbb, thread_ins);
10794 static_data_reg = alloc_ireg (cfg);
10795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10797 if (cfg->compile_aot) {
10798 int offset_reg, offset2_reg, idx_reg;
10800 /* For TLS variables, this will return the TLS offset */
10801 EMIT_NEW_SFLDACONST (cfg, ins, field);
10802 offset_reg = ins->dreg;
10803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10804 idx_reg = alloc_ireg (cfg);
10805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10808 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10809 array_reg = alloc_ireg (cfg);
10810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10811 offset2_reg = alloc_ireg (cfg);
10812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10813 dreg = alloc_ireg (cfg);
10814 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10816 offset = (gsize)addr & 0x7fffffff;
10817 idx = (offset >> 24) - 1;
10819 array_reg = alloc_ireg (cfg);
10820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10821 dreg = alloc_ireg (cfg);
10822 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10824 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10825 (cfg->compile_aot && is_special_static) ||
10826 (context_used && is_special_static)) {
10827 MonoInst *iargs [2];
10829 g_assert (field->parent);
10830 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10831 if (context_used) {
10832 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10833 field, MONO_RGCTX_INFO_CLASS_FIELD);
10835 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10837 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10838 } else if (context_used) {
10839 MonoInst *static_data;
10842 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10843 method->klass->name_space, method->klass->name, method->name,
10844 depth, field->offset);
10847 if (mono_class_needs_cctor_run (klass, method))
10848 emit_generic_class_init (cfg, klass);
10851 * The pointer we're computing here is
10853 * super_info.static_data + field->offset
10855 static_data = emit_get_rgctx_klass (cfg, context_used,
10856 klass, MONO_RGCTX_INFO_STATIC_DATA);
10858 if (mini_is_gsharedvt_klass (cfg, klass)) {
10859 MonoInst *offset_ins;
10861 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10862 dreg = alloc_ireg_mp (cfg);
10863 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10864 } else if (field->offset == 0) {
10867 int addr_reg = mono_alloc_preg (cfg);
10868 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10870 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10871 MonoInst *iargs [2];
10873 g_assert (field->parent);
10874 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10875 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10876 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10878 MonoVTable *vtable = NULL;
10880 if (!cfg->compile_aot)
10881 vtable = mono_class_vtable (cfg->domain, klass);
10882 CHECK_TYPELOAD (klass);
10885 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10886 if (!(g_slist_find (class_inits, klass))) {
10887 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10888 if (cfg->verbose_level > 2)
10889 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10890 class_inits = g_slist_prepend (class_inits, klass);
10893 if (cfg->run_cctors) {
10895 /* This makes so that inline cannot trigger */
10896 /* .cctors: too many apps depend on them */
10897 /* running with a specific order... */
10899 if (! vtable->initialized)
10900 INLINE_FAILURE ("class init");
10901 ex = mono_runtime_class_init_full (vtable, FALSE);
10903 set_exception_object (cfg, ex);
10904 goto exception_exit;
10908 if (cfg->compile_aot)
10909 EMIT_NEW_SFLDACONST (cfg, ins, field);
10912 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10914 EMIT_NEW_PCONST (cfg, ins, addr);
10917 MonoInst *iargs [1];
10918 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10919 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10923 /* Generate IR to do the actual load/store operation */
10925 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10926 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10927 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10930 if (op == CEE_LDSFLDA) {
10931 ins->klass = mono_class_from_mono_type (ftype);
10932 ins->type = STACK_PTR;
10934 } else if (op == CEE_STSFLD) {
10937 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10938 store->flags |= ins_flag;
10940 gboolean is_const = FALSE;
10941 MonoVTable *vtable = NULL;
10942 gpointer addr = NULL;
10944 if (!context_used) {
10945 vtable = mono_class_vtable (cfg->domain, klass);
10946 CHECK_TYPELOAD (klass);
10948 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10949 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10950 int ro_type = ftype->type;
10952 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10953 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10954 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10957 GSHAREDVT_FAILURE (op);
10959 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10962 case MONO_TYPE_BOOLEAN:
10964 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10968 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10971 case MONO_TYPE_CHAR:
10973 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10977 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10982 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10986 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10991 case MONO_TYPE_PTR:
10992 case MONO_TYPE_FNPTR:
10993 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10994 type_to_eval_stack_type ((cfg), field->type, *sp);
10997 case MONO_TYPE_STRING:
10998 case MONO_TYPE_OBJECT:
10999 case MONO_TYPE_CLASS:
11000 case MONO_TYPE_SZARRAY:
11001 case MONO_TYPE_ARRAY:
11002 if (!mono_gc_is_moving ()) {
11003 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11004 type_to_eval_stack_type ((cfg), field->type, *sp);
11012 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11017 case MONO_TYPE_VALUETYPE:
11027 CHECK_STACK_OVF (1);
11029 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11030 load->flags |= ins_flag;
11036 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11037 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11038 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11049 token = read32 (ip + 1);
11050 klass = mini_get_class (method, token, generic_context);
11051 CHECK_TYPELOAD (klass);
11052 if (ins_flag & MONO_INST_VOLATILE) {
11053 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11054 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11056 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11057 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11058 ins->flags |= ins_flag;
11059 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11060 generic_class_is_reference_type (cfg, klass)) {
11061 /* insert call to write barrier */
11062 emit_write_barrier (cfg, sp [0], sp [1]);
11074 const char *data_ptr;
11076 guint32 field_token;
11082 token = read32 (ip + 1);
11084 klass = mini_get_class (method, token, generic_context);
11085 CHECK_TYPELOAD (klass);
11087 context_used = mini_class_check_context_used (cfg, klass);
11089 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11090 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11091 ins->sreg1 = sp [0]->dreg;
11092 ins->type = STACK_I4;
11093 ins->dreg = alloc_ireg (cfg);
11094 MONO_ADD_INS (cfg->cbb, ins);
11095 *sp = mono_decompose_opcode (cfg, ins);
11098 if (context_used) {
11099 MonoInst *args [3];
11100 MonoClass *array_class = mono_array_class_get (klass, 1);
11101 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11103 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11106 args [0] = emit_get_rgctx_klass (cfg, context_used,
11107 array_class, MONO_RGCTX_INFO_VTABLE);
11112 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11114 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11116 if (cfg->opt & MONO_OPT_SHARED) {
11117 /* Decompose now to avoid problems with references to the domainvar */
11118 MonoInst *iargs [3];
11120 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11121 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11122 iargs [2] = sp [0];
11124 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11126 /* Decompose later since it is needed by abcrem */
11127 MonoClass *array_type = mono_array_class_get (klass, 1);
11128 mono_class_vtable (cfg->domain, array_type);
11129 CHECK_TYPELOAD (array_type);
11131 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11132 ins->dreg = alloc_ireg_ref (cfg);
11133 ins->sreg1 = sp [0]->dreg;
11134 ins->inst_newa_class = klass;
11135 ins->type = STACK_OBJ;
11136 ins->klass = array_type;
11137 MONO_ADD_INS (cfg->cbb, ins);
11138 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11139 cfg->cbb->has_array_access = TRUE;
11141 /* Needed so mono_emit_load_get_addr () gets called */
11142 mono_get_got_var (cfg);
11152 * we inline/optimize the initialization sequence if possible.
11153 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11154 * for small sizes open code the memcpy
11155 * ensure the rva field is big enough
11157 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11158 MonoMethod *memcpy_method = get_memcpy_method ();
11159 MonoInst *iargs [3];
11160 int add_reg = alloc_ireg_mp (cfg);
11162 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11163 if (cfg->compile_aot) {
11164 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11166 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11168 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11169 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11178 if (sp [0]->type != STACK_OBJ)
11181 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11182 ins->dreg = alloc_preg (cfg);
11183 ins->sreg1 = sp [0]->dreg;
11184 ins->type = STACK_I4;
11185 /* This flag will be inherited by the decomposition */
11186 ins->flags |= MONO_INST_FAULT;
11187 MONO_ADD_INS (cfg->cbb, ins);
11188 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11189 cfg->cbb->has_array_access = TRUE;
11197 if (sp [0]->type != STACK_OBJ)
11200 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11202 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11203 CHECK_TYPELOAD (klass);
11204 /* we need to make sure that this array is exactly the type it needs
11205 * to be for correctness. the wrappers are lax with their usage
11206 * so we need to ignore them here
11208 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11209 MonoClass *array_class = mono_array_class_get (klass, 1);
11210 mini_emit_check_array_type (cfg, sp [0], array_class);
11211 CHECK_TYPELOAD (array_class);
11215 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11220 case CEE_LDELEM_I1:
11221 case CEE_LDELEM_U1:
11222 case CEE_LDELEM_I2:
11223 case CEE_LDELEM_U2:
11224 case CEE_LDELEM_I4:
11225 case CEE_LDELEM_U4:
11226 case CEE_LDELEM_I8:
11228 case CEE_LDELEM_R4:
11229 case CEE_LDELEM_R8:
11230 case CEE_LDELEM_REF: {
11236 if (*ip == CEE_LDELEM) {
11238 token = read32 (ip + 1);
11239 klass = mini_get_class (method, token, generic_context);
11240 CHECK_TYPELOAD (klass);
11241 mono_class_init (klass);
11244 klass = array_access_to_klass (*ip);
11246 if (sp [0]->type != STACK_OBJ)
11249 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11251 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11252 // FIXME-VT: OP_ICONST optimization
11253 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11255 ins->opcode = OP_LOADV_MEMBASE;
11256 } else if (sp [1]->opcode == OP_ICONST) {
11257 int array_reg = sp [0]->dreg;
11258 int index_reg = sp [1]->dreg;
11259 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11261 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11262 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11264 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11265 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11268 if (*ip == CEE_LDELEM)
11275 case CEE_STELEM_I1:
11276 case CEE_STELEM_I2:
11277 case CEE_STELEM_I4:
11278 case CEE_STELEM_I8:
11279 case CEE_STELEM_R4:
11280 case CEE_STELEM_R8:
11281 case CEE_STELEM_REF:
11286 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11288 if (*ip == CEE_STELEM) {
11290 token = read32 (ip + 1);
11291 klass = mini_get_class (method, token, generic_context);
11292 CHECK_TYPELOAD (klass);
11293 mono_class_init (klass);
11296 klass = array_access_to_klass (*ip);
11298 if (sp [0]->type != STACK_OBJ)
11301 emit_array_store (cfg, klass, sp, TRUE);
11303 if (*ip == CEE_STELEM)
11310 case CEE_CKFINITE: {
11314 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11315 ins->sreg1 = sp [0]->dreg;
11316 ins->dreg = alloc_freg (cfg);
11317 ins->type = STACK_R8;
11318 MONO_ADD_INS (bblock, ins);
11320 *sp++ = mono_decompose_opcode (cfg, ins);
11325 case CEE_REFANYVAL: {
11326 MonoInst *src_var, *src;
11328 int klass_reg = alloc_preg (cfg);
11329 int dreg = alloc_preg (cfg);
11331 GSHAREDVT_FAILURE (*ip);
11334 MONO_INST_NEW (cfg, ins, *ip);
11337 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11338 CHECK_TYPELOAD (klass);
11340 context_used = mini_class_check_context_used (cfg, klass);
11343 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11345 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11346 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11349 if (context_used) {
11350 MonoInst *klass_ins;
11352 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11353 klass, MONO_RGCTX_INFO_KLASS);
11356 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11357 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11359 mini_emit_class_check (cfg, klass_reg, klass);
11361 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11362 ins->type = STACK_MP;
11367 case CEE_MKREFANY: {
11368 MonoInst *loc, *addr;
11370 GSHAREDVT_FAILURE (*ip);
11373 MONO_INST_NEW (cfg, ins, *ip);
11376 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11377 CHECK_TYPELOAD (klass);
11379 context_used = mini_class_check_context_used (cfg, klass);
11381 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11382 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11384 if (context_used) {
11385 MonoInst *const_ins;
11386 int type_reg = alloc_preg (cfg);
11388 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11392 } else if (cfg->compile_aot) {
11393 int const_reg = alloc_preg (cfg);
11394 int type_reg = alloc_preg (cfg);
11396 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11397 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11401 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11402 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11406 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11407 ins->type = STACK_VTYPE;
11408 ins->klass = mono_defaults.typed_reference_class;
11413 case CEE_LDTOKEN: {
11415 MonoClass *handle_class;
11417 CHECK_STACK_OVF (1);
11420 n = read32 (ip + 1);
11422 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11423 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11424 handle = mono_method_get_wrapper_data (method, n);
11425 handle_class = mono_method_get_wrapper_data (method, n + 1);
11426 if (handle_class == mono_defaults.typehandle_class)
11427 handle = &((MonoClass*)handle)->byval_arg;
11430 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11435 mono_class_init (handle_class);
11436 if (cfg->generic_sharing_context) {
11437 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11438 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11439 /* This case handles ldtoken
11440 of an open type, like for
11443 } else if (handle_class == mono_defaults.typehandle_class) {
11444 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11445 } else if (handle_class == mono_defaults.fieldhandle_class)
11446 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11447 else if (handle_class == mono_defaults.methodhandle_class)
11448 context_used = mini_method_check_context_used (cfg, handle);
11450 g_assert_not_reached ();
11453 if ((cfg->opt & MONO_OPT_SHARED) &&
11454 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11455 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11456 MonoInst *addr, *vtvar, *iargs [3];
11457 int method_context_used;
11459 method_context_used = mini_method_check_context_used (cfg, method);
11461 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11463 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11464 EMIT_NEW_ICONST (cfg, iargs [1], n);
11465 if (method_context_used) {
11466 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11467 method, MONO_RGCTX_INFO_METHOD);
11468 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11470 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11471 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11473 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11475 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11477 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11479 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11480 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11481 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11482 (cmethod->klass == mono_defaults.systemtype_class) &&
11483 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11484 MonoClass *tclass = mono_class_from_mono_type (handle);
11486 mono_class_init (tclass);
11487 if (context_used) {
11488 ins = emit_get_rgctx_klass (cfg, context_used,
11489 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11490 } else if (cfg->compile_aot) {
11491 if (method->wrapper_type) {
11492 mono_error_init (&error); //got to do it since there are multiple conditionals below
11493 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11494 /* Special case for static synchronized wrappers */
11495 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11497 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11498 /* FIXME: n is not a normal token */
11500 EMIT_NEW_PCONST (cfg, ins, NULL);
11503 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11506 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11508 ins->type = STACK_OBJ;
11509 ins->klass = cmethod->klass;
11512 MonoInst *addr, *vtvar;
11514 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11516 if (context_used) {
11517 if (handle_class == mono_defaults.typehandle_class) {
11518 ins = emit_get_rgctx_klass (cfg, context_used,
11519 mono_class_from_mono_type (handle),
11520 MONO_RGCTX_INFO_TYPE);
11521 } else if (handle_class == mono_defaults.methodhandle_class) {
11522 ins = emit_get_rgctx_method (cfg, context_used,
11523 handle, MONO_RGCTX_INFO_METHOD);
11524 } else if (handle_class == mono_defaults.fieldhandle_class) {
11525 ins = emit_get_rgctx_field (cfg, context_used,
11526 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11528 g_assert_not_reached ();
11530 } else if (cfg->compile_aot) {
11531 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11533 EMIT_NEW_PCONST (cfg, ins, handle);
11535 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11536 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11537 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11547 MONO_INST_NEW (cfg, ins, OP_THROW);
11549 ins->sreg1 = sp [0]->dreg;
11551 bblock->out_of_line = TRUE;
11552 MONO_ADD_INS (bblock, ins);
11553 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11554 MONO_ADD_INS (bblock, ins);
11557 link_bblock (cfg, bblock, end_bblock);
11558 start_new_bblock = 1;
11560 case CEE_ENDFINALLY:
11561 /* mono_save_seq_point_info () depends on this */
11562 if (sp != stack_start)
11563 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11564 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11565 MONO_ADD_INS (bblock, ins);
11567 start_new_bblock = 1;
11570 * Control will leave the method so empty the stack, otherwise
11571 * the next basic block will start with a nonempty stack.
11573 while (sp != stack_start) {
11578 case CEE_LEAVE_S: {
11581 if (*ip == CEE_LEAVE) {
11583 target = ip + 5 + (gint32)read32(ip + 1);
11586 target = ip + 2 + (signed char)(ip [1]);
11589 /* empty the stack */
11590 while (sp != stack_start) {
11595 * If this leave statement is in a catch block, check for a
11596 * pending exception, and rethrow it if necessary.
11597 * We avoid doing this in runtime invoke wrappers, since those are called
11598 * by native code which excepts the wrapper to catch all exceptions.
11600 for (i = 0; i < header->num_clauses; ++i) {
11601 MonoExceptionClause *clause = &header->clauses [i];
11604 * Use <= in the final comparison to handle clauses with multiple
11605 * leave statements, like in bug #78024.
11606 * The ordering of the exception clauses guarantees that we find the
11607 * innermost clause.
11609 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11611 MonoBasicBlock *dont_throw;
11616 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11619 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11621 NEW_BBLOCK (cfg, dont_throw);
11624 * Currently, we always rethrow the abort exception, despite the
11625 * fact that this is not correct. See thread6.cs for an example.
11626 * But propagating the abort exception is more important than
11627 * getting the sematics right.
11629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11631 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11633 MONO_START_BB (cfg, dont_throw);
11638 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11640 MonoExceptionClause *clause;
11642 for (tmp = handlers; tmp; tmp = tmp->next) {
11643 clause = tmp->data;
11644 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11646 link_bblock (cfg, bblock, tblock);
11647 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11648 ins->inst_target_bb = tblock;
11649 ins->inst_eh_block = clause;
11650 MONO_ADD_INS (bblock, ins);
11651 bblock->has_call_handler = 1;
11652 if (COMPILE_LLVM (cfg)) {
11653 MonoBasicBlock *target_bb;
11656 * Link the finally bblock with the target, since it will
11657 * conceptually branch there.
11658 * FIXME: Have to link the bblock containing the endfinally.
11660 GET_BBLOCK (cfg, target_bb, target);
11661 link_bblock (cfg, tblock, target_bb);
11664 g_list_free (handlers);
11667 MONO_INST_NEW (cfg, ins, OP_BR);
11668 MONO_ADD_INS (bblock, ins);
11669 GET_BBLOCK (cfg, tblock, target);
11670 link_bblock (cfg, bblock, tblock);
11671 ins->inst_target_bb = tblock;
11672 start_new_bblock = 1;
11674 if (*ip == CEE_LEAVE)
11683 * Mono specific opcodes
11685 case MONO_CUSTOM_PREFIX: {
11687 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11691 case CEE_MONO_ICALL: {
11693 MonoJitICallInfo *info;
11695 token = read32 (ip + 2);
11696 func = mono_method_get_wrapper_data (method, token);
11697 info = mono_find_jit_icall_by_addr (func);
11699 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11702 CHECK_STACK (info->sig->param_count);
11703 sp -= info->sig->param_count;
11705 ins = mono_emit_jit_icall (cfg, info->func, sp);
11706 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11710 inline_costs += 10 * num_calls++;
11714 case CEE_MONO_LDPTR: {
11717 CHECK_STACK_OVF (1);
11719 token = read32 (ip + 2);
11721 ptr = mono_method_get_wrapper_data (method, token);
11722 /* FIXME: Generalize this */
11723 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11724 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11729 EMIT_NEW_PCONST (cfg, ins, ptr);
11732 inline_costs += 10 * num_calls++;
11733 /* Can't embed random pointers into AOT code */
11737 case CEE_MONO_JIT_ICALL_ADDR: {
11738 MonoJitICallInfo *callinfo;
11741 CHECK_STACK_OVF (1);
11743 token = read32 (ip + 2);
11745 ptr = mono_method_get_wrapper_data (method, token);
11746 callinfo = mono_find_jit_icall_by_addr (ptr);
11747 g_assert (callinfo);
11748 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11751 inline_costs += 10 * num_calls++;
11754 case CEE_MONO_ICALL_ADDR: {
11755 MonoMethod *cmethod;
11758 CHECK_STACK_OVF (1);
11760 token = read32 (ip + 2);
11762 cmethod = mono_method_get_wrapper_data (method, token);
11764 if (cfg->compile_aot) {
11765 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11767 ptr = mono_lookup_internal_call (cmethod);
11769 EMIT_NEW_PCONST (cfg, ins, ptr);
11775 case CEE_MONO_VTADDR: {
11776 MonoInst *src_var, *src;
11782 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11783 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11788 case CEE_MONO_NEWOBJ: {
11789 MonoInst *iargs [2];
11791 CHECK_STACK_OVF (1);
11793 token = read32 (ip + 2);
11794 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11795 mono_class_init (klass);
11796 NEW_DOMAINCONST (cfg, iargs [0]);
11797 MONO_ADD_INS (cfg->cbb, iargs [0]);
11798 NEW_CLASSCONST (cfg, iargs [1], klass);
11799 MONO_ADD_INS (cfg->cbb, iargs [1]);
11800 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11802 inline_costs += 10 * num_calls++;
11805 case CEE_MONO_OBJADDR:
11808 MONO_INST_NEW (cfg, ins, OP_MOVE);
11809 ins->dreg = alloc_ireg_mp (cfg);
11810 ins->sreg1 = sp [0]->dreg;
11811 ins->type = STACK_MP;
11812 MONO_ADD_INS (cfg->cbb, ins);
11816 case CEE_MONO_LDNATIVEOBJ:
11818 * Similar to LDOBJ, but instead load the unmanaged
11819 * representation of the vtype to the stack.
11824 token = read32 (ip + 2);
11825 klass = mono_method_get_wrapper_data (method, token);
11826 g_assert (klass->valuetype);
11827 mono_class_init (klass);
11830 MonoInst *src, *dest, *temp;
11833 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11834 temp->backend.is_pinvoke = 1;
11835 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11836 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11838 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11839 dest->type = STACK_VTYPE;
11840 dest->klass = klass;
11846 case CEE_MONO_RETOBJ: {
11848 * Same as RET, but return the native representation of a vtype
11851 g_assert (cfg->ret);
11852 g_assert (mono_method_signature (method)->pinvoke);
11857 token = read32 (ip + 2);
11858 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11860 if (!cfg->vret_addr) {
11861 g_assert (cfg->ret_var_is_local);
11863 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11865 EMIT_NEW_RETLOADA (cfg, ins);
11867 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11869 if (sp != stack_start)
11872 MONO_INST_NEW (cfg, ins, OP_BR);
11873 ins->inst_target_bb = end_bblock;
11874 MONO_ADD_INS (bblock, ins);
11875 link_bblock (cfg, bblock, end_bblock);
11876 start_new_bblock = 1;
11880 case CEE_MONO_CISINST:
11881 case CEE_MONO_CCASTCLASS: {
11886 token = read32 (ip + 2);
11887 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11888 if (ip [1] == CEE_MONO_CISINST)
11889 ins = handle_cisinst (cfg, klass, sp [0]);
11891 ins = handle_ccastclass (cfg, klass, sp [0]);
11897 case CEE_MONO_SAVE_LMF:
11898 case CEE_MONO_RESTORE_LMF:
11899 #ifdef MONO_ARCH_HAVE_LMF_OPS
11900 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11901 MONO_ADD_INS (bblock, ins);
11902 cfg->need_lmf_area = TRUE;
11906 case CEE_MONO_CLASSCONST:
11907 CHECK_STACK_OVF (1);
11909 token = read32 (ip + 2);
11910 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11913 inline_costs += 10 * num_calls++;
11915 case CEE_MONO_NOT_TAKEN:
11916 bblock->out_of_line = TRUE;
11919 case CEE_MONO_TLS: {
11922 CHECK_STACK_OVF (1);
11924 key = (gint32)read32 (ip + 2);
11925 g_assert (key < TLS_KEY_NUM);
11927 ins = mono_create_tls_get (cfg, key);
11929 if (cfg->compile_aot) {
11931 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11932 ins->dreg = alloc_preg (cfg);
11933 ins->type = STACK_PTR;
11935 g_assert_not_reached ();
11938 ins->type = STACK_PTR;
11939 MONO_ADD_INS (bblock, ins);
11944 case CEE_MONO_DYN_CALL: {
11945 MonoCallInst *call;
11947 /* It would be easier to call a trampoline, but that would put an
11948 * extra frame on the stack, confusing exception handling. So
11949 * implement it inline using an opcode for now.
11952 if (!cfg->dyn_call_var) {
11953 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11954 /* prevent it from being register allocated */
11955 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11958 /* Has to use a call inst since it local regalloc expects it */
11959 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11960 ins = (MonoInst*)call;
11962 ins->sreg1 = sp [0]->dreg;
11963 ins->sreg2 = sp [1]->dreg;
11964 MONO_ADD_INS (bblock, ins);
11966 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11969 inline_costs += 10 * num_calls++;
11973 case CEE_MONO_MEMORY_BARRIER: {
11975 emit_memory_barrier (cfg, (int)read32 (ip + 2));
11979 case CEE_MONO_JIT_ATTACH: {
11980 MonoInst *args [16], *domain_ins;
11981 MonoInst *ad_ins, *jit_tls_ins;
11982 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11984 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11986 EMIT_NEW_PCONST (cfg, ins, NULL);
11987 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11989 ad_ins = mono_get_domain_intrinsic (cfg);
11990 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11992 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11993 NEW_BBLOCK (cfg, next_bb);
11994 NEW_BBLOCK (cfg, call_bb);
11996 if (cfg->compile_aot) {
11997 /* AOT code is only used in the root domain */
11998 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12000 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12002 MONO_ADD_INS (cfg->cbb, ad_ins);
12003 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12004 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12006 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12011 MONO_START_BB (cfg, call_bb);
12014 if (cfg->compile_aot) {
12015 /* AOT code is only used in the root domain */
12016 EMIT_NEW_PCONST (cfg, args [0], NULL);
12018 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12020 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12021 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12024 MONO_START_BB (cfg, next_bb);
12030 case CEE_MONO_JIT_DETACH: {
12031 MonoInst *args [16];
12033 /* Restore the original domain */
12034 dreg = alloc_ireg (cfg);
12035 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12036 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12041 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12047 case CEE_PREFIX1: {
12050 case CEE_ARGLIST: {
12051 /* somewhat similar to LDTOKEN */
12052 MonoInst *addr, *vtvar;
12053 CHECK_STACK_OVF (1);
12054 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12056 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12057 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12059 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12060 ins->type = STACK_VTYPE;
12061 ins->klass = mono_defaults.argumenthandle_class;
12074 * The following transforms:
12075 * CEE_CEQ into OP_CEQ
12076 * CEE_CGT into OP_CGT
12077 * CEE_CGT_UN into OP_CGT_UN
12078 * CEE_CLT into OP_CLT
12079 * CEE_CLT_UN into OP_CLT_UN
12081 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12083 MONO_INST_NEW (cfg, ins, cmp->opcode);
12085 cmp->sreg1 = sp [0]->dreg;
12086 cmp->sreg2 = sp [1]->dreg;
12087 type_from_op (cmp, sp [0], sp [1]);
12089 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
12090 cmp->opcode = OP_LCOMPARE;
12091 else if (sp [0]->type == STACK_R8)
12092 cmp->opcode = OP_FCOMPARE;
12094 cmp->opcode = OP_ICOMPARE;
12095 MONO_ADD_INS (bblock, cmp);
12096 ins->type = STACK_I4;
12097 ins->dreg = alloc_dreg (cfg, ins->type);
12098 type_from_op (ins, sp [0], sp [1]);
12100 if (cmp->opcode == OP_FCOMPARE) {
12102 * The backends expect the fceq opcodes to do the
12105 ins->sreg1 = cmp->sreg1;
12106 ins->sreg2 = cmp->sreg2;
12109 MONO_ADD_INS (bblock, ins);
12115 MonoInst *argconst;
12116 MonoMethod *cil_method;
12118 CHECK_STACK_OVF (1);
12120 n = read32 (ip + 2);
12121 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12122 if (!cmethod || mono_loader_get_last_error ())
12124 mono_class_init (cmethod->klass);
12126 mono_save_token_info (cfg, image, n, cmethod);
12128 context_used = mini_method_check_context_used (cfg, cmethod);
12130 cil_method = cmethod;
12131 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12132 METHOD_ACCESS_FAILURE (method, cil_method);
12134 if (mono_security_cas_enabled ()) {
12135 if (check_linkdemand (cfg, method, cmethod))
12136 INLINE_FAILURE ("linkdemand");
12137 CHECK_CFG_EXCEPTION;
12138 } else if (mono_security_core_clr_enabled ()) {
12139 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12143 * Optimize the common case of ldftn+delegate creation
12145 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12146 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12147 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12148 MonoInst *target_ins, *handle_ins;
12149 MonoMethod *invoke;
12150 int invoke_context_used;
12152 invoke = mono_get_delegate_invoke (ctor_method->klass);
12153 if (!invoke || !mono_method_signature (invoke))
12156 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12158 target_ins = sp [-1];
12160 if (mono_security_core_clr_enabled ())
12161 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12163 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12164 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12165 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12166 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12167 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12171 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12172 /* FIXME: SGEN support */
12173 if (invoke_context_used == 0) {
12175 if (cfg->verbose_level > 3)
12176 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12177 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12180 CHECK_CFG_EXCEPTION;
12191 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12192 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12196 inline_costs += 10 * num_calls++;
12199 case CEE_LDVIRTFTN: {
12200 MonoInst *args [2];
12204 n = read32 (ip + 2);
12205 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12206 if (!cmethod || mono_loader_get_last_error ())
12208 mono_class_init (cmethod->klass);
12210 context_used = mini_method_check_context_used (cfg, cmethod);
12212 if (mono_security_cas_enabled ()) {
12213 if (check_linkdemand (cfg, method, cmethod))
12214 INLINE_FAILURE ("linkdemand");
12215 CHECK_CFG_EXCEPTION;
12216 } else if (mono_security_core_clr_enabled ()) {
12217 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12221 * Optimize the common case of ldvirtftn+delegate creation
12223 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12224 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12225 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12226 MonoInst *target_ins, *handle_ins;
12227 MonoMethod *invoke;
12228 int invoke_context_used;
12230 invoke = mono_get_delegate_invoke (ctor_method->klass);
12231 if (!invoke || !mono_method_signature (invoke))
12234 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12236 target_ins = sp [-1];
12238 if (mono_security_core_clr_enabled ())
12239 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12241 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12242 /* FIXME: SGEN support */
12243 if (invoke_context_used == 0) {
12245 if (cfg->verbose_level > 3)
12246 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12247 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12250 CHECK_CFG_EXCEPTION;
12264 args [1] = emit_get_rgctx_method (cfg, context_used,
12265 cmethod, MONO_RGCTX_INFO_METHOD);
12268 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12270 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12273 inline_costs += 10 * num_calls++;
12277 CHECK_STACK_OVF (1);
12279 n = read16 (ip + 2);
12281 EMIT_NEW_ARGLOAD (cfg, ins, n);
12286 CHECK_STACK_OVF (1);
12288 n = read16 (ip + 2);
12290 NEW_ARGLOADA (cfg, ins, n);
12291 MONO_ADD_INS (cfg->cbb, ins);
12299 n = read16 (ip + 2);
12301 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12303 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12307 CHECK_STACK_OVF (1);
12309 n = read16 (ip + 2);
12311 EMIT_NEW_LOCLOAD (cfg, ins, n);
12316 unsigned char *tmp_ip;
12317 CHECK_STACK_OVF (1);
12319 n = read16 (ip + 2);
12322 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12328 EMIT_NEW_LOCLOADA (cfg, ins, n);
12337 n = read16 (ip + 2);
12339 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12341 emit_stloc_ir (cfg, sp, header, n);
12348 if (sp != stack_start)
12350 if (cfg->method != method)
12352 * Inlining this into a loop in a parent could lead to
12353 * stack overflows which is different behavior than the
12354 * non-inlined case, thus disable inlining in this case.
12356 INLINE_FAILURE("localloc");
12358 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12359 ins->dreg = alloc_preg (cfg);
12360 ins->sreg1 = sp [0]->dreg;
12361 ins->type = STACK_PTR;
12362 MONO_ADD_INS (cfg->cbb, ins);
12364 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12366 ins->flags |= MONO_INST_INIT;
12371 case CEE_ENDFILTER: {
12372 MonoExceptionClause *clause, *nearest;
12373 int cc, nearest_num;
12377 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12379 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12380 ins->sreg1 = (*sp)->dreg;
12381 MONO_ADD_INS (bblock, ins);
12382 start_new_bblock = 1;
12387 for (cc = 0; cc < header->num_clauses; ++cc) {
12388 clause = &header->clauses [cc];
12389 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12390 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12391 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12396 g_assert (nearest);
12397 if ((ip - header->code) != nearest->handler_offset)
12402 case CEE_UNALIGNED_:
12403 ins_flag |= MONO_INST_UNALIGNED;
12404 /* FIXME: record alignment? we can assume 1 for now */
12408 case CEE_VOLATILE_:
12409 ins_flag |= MONO_INST_VOLATILE;
12413 ins_flag |= MONO_INST_TAILCALL;
12414 cfg->flags |= MONO_CFG_HAS_TAIL;
12415 /* Can't inline tail calls at this time */
12416 inline_costs += 100000;
12423 token = read32 (ip + 2);
12424 klass = mini_get_class (method, token, generic_context);
12425 CHECK_TYPELOAD (klass);
12426 if (generic_class_is_reference_type (cfg, klass))
12427 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12429 mini_emit_initobj (cfg, *sp, NULL, klass);
12433 case CEE_CONSTRAINED_:
12435 token = read32 (ip + 2);
12436 constrained_call = mini_get_class (method, token, generic_context);
12437 CHECK_TYPELOAD (constrained_call);
12441 case CEE_INITBLK: {
12442 MonoInst *iargs [3];
12446 /* Skip optimized paths for volatile operations. */
12447 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12448 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12449 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12450 /* emit_memset only works when val == 0 */
12451 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12454 iargs [0] = sp [0];
12455 iargs [1] = sp [1];
12456 iargs [2] = sp [2];
12457 if (ip [1] == CEE_CPBLK) {
12459 * FIXME: It's unclear whether we should be emitting both the acquire
12460 * and release barriers for cpblk. It is technically both a load and
12461 * store operation, so it seems like that's the sensible thing to do.
12463 * FIXME: We emit full barriers on both sides of the operation for
12464 * simplicity. We should have a separate atomic memcpy method instead.
12466 MonoMethod *memcpy_method = get_memcpy_method ();
12468 if (ins_flag & MONO_INST_VOLATILE)
12469 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12471 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12472 call->flags |= ins_flag;
12474 if (ins_flag & MONO_INST_VOLATILE)
12475 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12477 MonoMethod *memset_method = get_memset_method ();
12478 if (ins_flag & MONO_INST_VOLATILE) {
12479 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12480 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12482 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12483 call->flags |= ins_flag;
12494 ins_flag |= MONO_INST_NOTYPECHECK;
12496 ins_flag |= MONO_INST_NORANGECHECK;
12497 /* we ignore the no-nullcheck for now since we
12498 * really do it explicitly only when doing callvirt->call
12502 case CEE_RETHROW: {
12504 int handler_offset = -1;
12506 for (i = 0; i < header->num_clauses; ++i) {
12507 MonoExceptionClause *clause = &header->clauses [i];
12508 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12509 handler_offset = clause->handler_offset;
12514 bblock->flags |= BB_EXCEPTION_UNSAFE;
12516 if (handler_offset == -1)
12519 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12520 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12521 ins->sreg1 = load->dreg;
12522 MONO_ADD_INS (bblock, ins);
12524 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12525 MONO_ADD_INS (bblock, ins);
12528 link_bblock (cfg, bblock, end_bblock);
12529 start_new_bblock = 1;
12537 CHECK_STACK_OVF (1);
12539 token = read32 (ip + 2);
12540 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12541 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12544 val = mono_type_size (type, &ialign);
12546 MonoClass *klass = mini_get_class (method, token, generic_context);
12547 CHECK_TYPELOAD (klass);
12549 val = mono_type_size (&klass->byval_arg, &ialign);
12551 if (mini_is_gsharedvt_klass (cfg, klass))
12552 GSHAREDVT_FAILURE (*ip);
12554 EMIT_NEW_ICONST (cfg, ins, val);
12559 case CEE_REFANYTYPE: {
12560 MonoInst *src_var, *src;
12562 GSHAREDVT_FAILURE (*ip);
12568 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12570 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12571 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12577 case CEE_READONLY_:
12590 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12600 g_warning ("opcode 0x%02x not handled", *ip);
12604 if (start_new_bblock != 1)
12607 bblock->cil_length = ip - bblock->cil_code;
12608 if (bblock->next_bb) {
12609 /* This could already be set because of inlining, #693905 */
12610 MonoBasicBlock *bb = bblock;
12612 while (bb->next_bb)
12614 bb->next_bb = end_bblock;
12616 bblock->next_bb = end_bblock;
12619 if (cfg->method == method && cfg->domainvar) {
12621 MonoInst *get_domain;
12623 cfg->cbb = init_localsbb;
12625 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12626 MONO_ADD_INS (cfg->cbb, get_domain);
12628 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12630 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12631 MONO_ADD_INS (cfg->cbb, store);
12634 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12635 if (cfg->compile_aot)
12636 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12637 mono_get_got_var (cfg);
12640 if (cfg->method == method && cfg->got_var)
12641 mono_emit_load_got_addr (cfg);
12643 if (init_localsbb) {
12644 cfg->cbb = init_localsbb;
12646 for (i = 0; i < header->num_locals; ++i) {
12647 emit_init_local (cfg, i, header->locals [i], init_locals);
12651 if (cfg->init_ref_vars && cfg->method == method) {
12652 /* Emit initialization for ref vars */
12653 // FIXME: Avoid duplication initialization for IL locals.
12654 for (i = 0; i < cfg->num_varinfo; ++i) {
12655 MonoInst *ins = cfg->varinfo [i];
12657 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12658 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12662 if (cfg->lmf_var && cfg->method == method) {
12663 cfg->cbb = init_localsbb;
12664 emit_push_lmf (cfg);
12667 cfg->cbb = init_localsbb;
12668 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12671 MonoBasicBlock *bb;
12674 * Make seq points at backward branch targets interruptable.
12676 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12677 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12678 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12681 /* Add a sequence point for method entry/exit events */
12682 if (cfg->gen_seq_points_debug_data) {
12683 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12684 MONO_ADD_INS (init_localsbb, ins);
12685 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12686 MONO_ADD_INS (cfg->bb_exit, ins);
12690 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12691 * the code they refer to was dead (#11880).
12693 if (sym_seq_points) {
12694 for (i = 0; i < header->code_size; ++i) {
12695 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12698 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12699 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12706 if (cfg->method == method) {
12707 MonoBasicBlock *bb;
12708 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12709 bb->region = mono_find_block_region (cfg, bb->real_offset);
12711 mono_create_spvar_for_region (cfg, bb->region);
12712 if (cfg->verbose_level > 2)
12713 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12717 if (inline_costs < 0) {
12720 /* Method is too large */
12721 mname = mono_method_full_name (method, TRUE);
12722 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12723 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12727 if ((cfg->verbose_level > 2) && (cfg->method == method))
12728 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12733 g_assert (!mono_error_ok (&cfg->error));
12737 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12741 set_exception_type_from_invalid_il (cfg, method, ip);
12745 g_slist_free (class_inits);
12746 mono_basic_block_free (original_bb);
12747 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12748 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12749 if (cfg->exception_type)
12752 return inline_costs;
12756 store_membase_reg_to_store_membase_imm (int opcode)
12759 case OP_STORE_MEMBASE_REG:
12760 return OP_STORE_MEMBASE_IMM;
12761 case OP_STOREI1_MEMBASE_REG:
12762 return OP_STOREI1_MEMBASE_IMM;
12763 case OP_STOREI2_MEMBASE_REG:
12764 return OP_STOREI2_MEMBASE_IMM;
12765 case OP_STOREI4_MEMBASE_REG:
12766 return OP_STOREI4_MEMBASE_IMM;
12767 case OP_STOREI8_MEMBASE_REG:
12768 return OP_STOREI8_MEMBASE_IMM;
12770 g_assert_not_reached ();
12777 mono_op_to_op_imm (int opcode)
12781 return OP_IADD_IMM;
12783 return OP_ISUB_IMM;
12785 return OP_IDIV_IMM;
12787 return OP_IDIV_UN_IMM;
12789 return OP_IREM_IMM;
12791 return OP_IREM_UN_IMM;
12793 return OP_IMUL_IMM;
12795 return OP_IAND_IMM;
12799 return OP_IXOR_IMM;
12801 return OP_ISHL_IMM;
12803 return OP_ISHR_IMM;
12805 return OP_ISHR_UN_IMM;
12808 return OP_LADD_IMM;
12810 return OP_LSUB_IMM;
12812 return OP_LAND_IMM;
12816 return OP_LXOR_IMM;
12818 return OP_LSHL_IMM;
12820 return OP_LSHR_IMM;
12822 return OP_LSHR_UN_IMM;
12823 #if SIZEOF_REGISTER == 8
12825 return OP_LREM_IMM;
12829 return OP_COMPARE_IMM;
12831 return OP_ICOMPARE_IMM;
12833 return OP_LCOMPARE_IMM;
12835 case OP_STORE_MEMBASE_REG:
12836 return OP_STORE_MEMBASE_IMM;
12837 case OP_STOREI1_MEMBASE_REG:
12838 return OP_STOREI1_MEMBASE_IMM;
12839 case OP_STOREI2_MEMBASE_REG:
12840 return OP_STOREI2_MEMBASE_IMM;
12841 case OP_STOREI4_MEMBASE_REG:
12842 return OP_STOREI4_MEMBASE_IMM;
12844 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12846 return OP_X86_PUSH_IMM;
12847 case OP_X86_COMPARE_MEMBASE_REG:
12848 return OP_X86_COMPARE_MEMBASE_IMM;
12850 #if defined(TARGET_AMD64)
12851 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12852 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12854 case OP_VOIDCALL_REG:
12855 return OP_VOIDCALL;
12863 return OP_LOCALLOC_IMM;
12870 ldind_to_load_membase (int opcode)
12874 return OP_LOADI1_MEMBASE;
12876 return OP_LOADU1_MEMBASE;
12878 return OP_LOADI2_MEMBASE;
12880 return OP_LOADU2_MEMBASE;
12882 return OP_LOADI4_MEMBASE;
12884 return OP_LOADU4_MEMBASE;
12886 return OP_LOAD_MEMBASE;
12887 case CEE_LDIND_REF:
12888 return OP_LOAD_MEMBASE;
12890 return OP_LOADI8_MEMBASE;
12892 return OP_LOADR4_MEMBASE;
12894 return OP_LOADR8_MEMBASE;
12896 g_assert_not_reached ();
12903 stind_to_store_membase (int opcode)
12907 return OP_STOREI1_MEMBASE_REG;
12909 return OP_STOREI2_MEMBASE_REG;
12911 return OP_STOREI4_MEMBASE_REG;
12913 case CEE_STIND_REF:
12914 return OP_STORE_MEMBASE_REG;
12916 return OP_STOREI8_MEMBASE_REG;
12918 return OP_STORER4_MEMBASE_REG;
12920 return OP_STORER8_MEMBASE_REG;
12922 g_assert_not_reached ();
12929 mono_load_membase_to_load_mem (int opcode)
12931 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12932 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12934 case OP_LOAD_MEMBASE:
12935 return OP_LOAD_MEM;
12936 case OP_LOADU1_MEMBASE:
12937 return OP_LOADU1_MEM;
12938 case OP_LOADU2_MEMBASE:
12939 return OP_LOADU2_MEM;
12940 case OP_LOADI4_MEMBASE:
12941 return OP_LOADI4_MEM;
12942 case OP_LOADU4_MEMBASE:
12943 return OP_LOADU4_MEM;
12944 #if SIZEOF_REGISTER == 8
12945 case OP_LOADI8_MEMBASE:
12946 return OP_LOADI8_MEM;
12955 op_to_op_dest_membase (int store_opcode, int opcode)
12957 #if defined(TARGET_X86)
12958 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12963 return OP_X86_ADD_MEMBASE_REG;
12965 return OP_X86_SUB_MEMBASE_REG;
12967 return OP_X86_AND_MEMBASE_REG;
12969 return OP_X86_OR_MEMBASE_REG;
12971 return OP_X86_XOR_MEMBASE_REG;
12974 return OP_X86_ADD_MEMBASE_IMM;
12977 return OP_X86_SUB_MEMBASE_IMM;
12980 return OP_X86_AND_MEMBASE_IMM;
12983 return OP_X86_OR_MEMBASE_IMM;
12986 return OP_X86_XOR_MEMBASE_IMM;
12992 #if defined(TARGET_AMD64)
12993 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12998 return OP_X86_ADD_MEMBASE_REG;
13000 return OP_X86_SUB_MEMBASE_REG;
13002 return OP_X86_AND_MEMBASE_REG;
13004 return OP_X86_OR_MEMBASE_REG;
13006 return OP_X86_XOR_MEMBASE_REG;
13008 return OP_X86_ADD_MEMBASE_IMM;
13010 return OP_X86_SUB_MEMBASE_IMM;
13012 return OP_X86_AND_MEMBASE_IMM;
13014 return OP_X86_OR_MEMBASE_IMM;
13016 return OP_X86_XOR_MEMBASE_IMM;
13018 return OP_AMD64_ADD_MEMBASE_REG;
13020 return OP_AMD64_SUB_MEMBASE_REG;
13022 return OP_AMD64_AND_MEMBASE_REG;
13024 return OP_AMD64_OR_MEMBASE_REG;
13026 return OP_AMD64_XOR_MEMBASE_REG;
13029 return OP_AMD64_ADD_MEMBASE_IMM;
13032 return OP_AMD64_SUB_MEMBASE_IMM;
13035 return OP_AMD64_AND_MEMBASE_IMM;
13038 return OP_AMD64_OR_MEMBASE_IMM;
13041 return OP_AMD64_XOR_MEMBASE_IMM;
13051 op_to_op_store_membase (int store_opcode, int opcode)
13053 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13056 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13057 return OP_X86_SETEQ_MEMBASE;
13059 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13060 return OP_X86_SETNE_MEMBASE;
13068 op_to_op_src1_membase (int load_opcode, int opcode)
13071 /* FIXME: This has sign extension issues */
13073 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13074 return OP_X86_COMPARE_MEMBASE8_IMM;
13077 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13082 return OP_X86_PUSH_MEMBASE;
13083 case OP_COMPARE_IMM:
13084 case OP_ICOMPARE_IMM:
13085 return OP_X86_COMPARE_MEMBASE_IMM;
13088 return OP_X86_COMPARE_MEMBASE_REG;
13092 #ifdef TARGET_AMD64
13093 /* FIXME: This has sign extension issues */
13095 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13096 return OP_X86_COMPARE_MEMBASE8_IMM;
13101 #ifdef __mono_ilp32__
13102 if (load_opcode == OP_LOADI8_MEMBASE)
13104 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13106 return OP_X86_PUSH_MEMBASE;
13108 /* FIXME: This only works for 32 bit immediates
13109 case OP_COMPARE_IMM:
13110 case OP_LCOMPARE_IMM:
13111 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13112 return OP_AMD64_COMPARE_MEMBASE_IMM;
13114 case OP_ICOMPARE_IMM:
13115 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13116 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13120 #ifdef __mono_ilp32__
13121 if (load_opcode == OP_LOAD_MEMBASE)
13122 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13123 if (load_opcode == OP_LOADI8_MEMBASE)
13125 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13127 return OP_AMD64_COMPARE_MEMBASE_REG;
13130 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13131 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13140 op_to_op_src2_membase (int load_opcode, int opcode)
13143 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13149 return OP_X86_COMPARE_REG_MEMBASE;
13151 return OP_X86_ADD_REG_MEMBASE;
13153 return OP_X86_SUB_REG_MEMBASE;
13155 return OP_X86_AND_REG_MEMBASE;
13157 return OP_X86_OR_REG_MEMBASE;
13159 return OP_X86_XOR_REG_MEMBASE;
13163 #ifdef TARGET_AMD64
13164 #ifdef __mono_ilp32__
13165 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13167 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13171 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13173 return OP_X86_ADD_REG_MEMBASE;
13175 return OP_X86_SUB_REG_MEMBASE;
13177 return OP_X86_AND_REG_MEMBASE;
13179 return OP_X86_OR_REG_MEMBASE;
13181 return OP_X86_XOR_REG_MEMBASE;
13183 #ifdef __mono_ilp32__
13184 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13186 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13191 return OP_AMD64_COMPARE_REG_MEMBASE;
13193 return OP_AMD64_ADD_REG_MEMBASE;
13195 return OP_AMD64_SUB_REG_MEMBASE;
13197 return OP_AMD64_AND_REG_MEMBASE;
13199 return OP_AMD64_OR_REG_MEMBASE;
13201 return OP_AMD64_XOR_REG_MEMBASE;
13210 mono_op_to_op_imm_noemul (int opcode)
13213 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13219 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13226 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13231 return mono_op_to_op_imm (opcode);
13236 * mono_handle_global_vregs:
13238 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13242 mono_handle_global_vregs (MonoCompile *cfg)
13244 gint32 *vreg_to_bb;
13245 MonoBasicBlock *bb;
13248 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13250 #ifdef MONO_ARCH_SIMD_INTRINSICS
13251 if (cfg->uses_simd_intrinsics)
13252 mono_simd_simplify_indirection (cfg);
13255 /* Find local vregs used in more than one bb */
13256 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13257 MonoInst *ins = bb->code;
13258 int block_num = bb->block_num;
13260 if (cfg->verbose_level > 2)
13261 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13264 for (; ins; ins = ins->next) {
13265 const char *spec = INS_INFO (ins->opcode);
13266 int regtype = 0, regindex;
13269 if (G_UNLIKELY (cfg->verbose_level > 2))
13270 mono_print_ins (ins);
13272 g_assert (ins->opcode >= MONO_CEE_LAST);
13274 for (regindex = 0; regindex < 4; regindex ++) {
13277 if (regindex == 0) {
13278 regtype = spec [MONO_INST_DEST];
13279 if (regtype == ' ')
13282 } else if (regindex == 1) {
13283 regtype = spec [MONO_INST_SRC1];
13284 if (regtype == ' ')
13287 } else if (regindex == 2) {
13288 regtype = spec [MONO_INST_SRC2];
13289 if (regtype == ' ')
13292 } else if (regindex == 3) {
13293 regtype = spec [MONO_INST_SRC3];
13294 if (regtype == ' ')
13299 #if SIZEOF_REGISTER == 4
13300 /* In the LLVM case, the long opcodes are not decomposed */
13301 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13303 * Since some instructions reference the original long vreg,
13304 * and some reference the two component vregs, it is quite hard
13305 * to determine when it needs to be global. So be conservative.
13307 if (!get_vreg_to_inst (cfg, vreg)) {
13308 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13310 if (cfg->verbose_level > 2)
13311 printf ("LONG VREG R%d made global.\n", vreg);
13315 * Make the component vregs volatile since the optimizations can
13316 * get confused otherwise.
13318 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13319 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13323 g_assert (vreg != -1);
13325 prev_bb = vreg_to_bb [vreg];
13326 if (prev_bb == 0) {
13327 /* 0 is a valid block num */
13328 vreg_to_bb [vreg] = block_num + 1;
13329 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13330 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13333 if (!get_vreg_to_inst (cfg, vreg)) {
13334 if (G_UNLIKELY (cfg->verbose_level > 2))
13335 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13339 if (vreg_is_ref (cfg, vreg))
13340 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13342 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13345 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13348 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13351 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13354 g_assert_not_reached ();
13358 /* Flag as having been used in more than one bb */
13359 vreg_to_bb [vreg] = -1;
13365 /* If a variable is used in only one bblock, convert it into a local vreg */
13366 for (i = 0; i < cfg->num_varinfo; i++) {
13367 MonoInst *var = cfg->varinfo [i];
13368 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13370 switch (var->type) {
13376 #if SIZEOF_REGISTER == 8
13379 #if !defined(TARGET_X86)
13380 /* Enabling this screws up the fp stack on x86 */
13383 if (mono_arch_is_soft_float ())
13386 /* Arguments are implicitly global */
13387 /* Putting R4 vars into registers doesn't work currently */
13388 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13389 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13391 * Make that the variable's liveness interval doesn't contain a call, since
13392 * that would cause the lvreg to be spilled, making the whole optimization
13395 /* This is too slow for JIT compilation */
13397 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13399 int def_index, call_index, ins_index;
13400 gboolean spilled = FALSE;
13405 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13406 const char *spec = INS_INFO (ins->opcode);
13408 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13409 def_index = ins_index;
13411 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13412 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13413 if (call_index > def_index) {
13419 if (MONO_IS_CALL (ins))
13420 call_index = ins_index;
13430 if (G_UNLIKELY (cfg->verbose_level > 2))
13431 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13432 var->flags |= MONO_INST_IS_DEAD;
13433 cfg->vreg_to_inst [var->dreg] = NULL;
13440 * Compress the varinfo and vars tables so the liveness computation is faster and
13441 * takes up less space.
13444 for (i = 0; i < cfg->num_varinfo; ++i) {
13445 MonoInst *var = cfg->varinfo [i];
13446 if (pos < i && cfg->locals_start == i)
13447 cfg->locals_start = pos;
13448 if (!(var->flags & MONO_INST_IS_DEAD)) {
13450 cfg->varinfo [pos] = cfg->varinfo [i];
13451 cfg->varinfo [pos]->inst_c0 = pos;
13452 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13453 cfg->vars [pos].idx = pos;
13454 #if SIZEOF_REGISTER == 4
13455 if (cfg->varinfo [pos]->type == STACK_I8) {
13456 /* Modify the two component vars too */
13459 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13460 var1->inst_c0 = pos;
13461 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13462 var1->inst_c0 = pos;
13469 cfg->num_varinfo = pos;
13470 if (cfg->locals_start > cfg->num_varinfo)
13471 cfg->locals_start = cfg->num_varinfo;
13475 * mono_spill_global_vars:
13477 * Generate spill code for variables which are not allocated to registers,
13478 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13479 * code is generated which could be optimized by the local optimization passes.
13482 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13484 MonoBasicBlock *bb;
13486 int orig_next_vreg;
13487 guint32 *vreg_to_lvreg;
13489 guint32 i, lvregs_len;
13490 gboolean dest_has_lvreg = FALSE;
13491 guint32 stacktypes [128];
13492 MonoInst **live_range_start, **live_range_end;
13493 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13494 int *gsharedvt_vreg_to_idx = NULL;
13496 *need_local_opts = FALSE;
13498 memset (spec2, 0, sizeof (spec2));
13500 /* FIXME: Move this function to mini.c */
13501 stacktypes ['i'] = STACK_PTR;
13502 stacktypes ['l'] = STACK_I8;
13503 stacktypes ['f'] = STACK_R8;
13504 #ifdef MONO_ARCH_SIMD_INTRINSICS
13505 stacktypes ['x'] = STACK_VTYPE;
13508 #if SIZEOF_REGISTER == 4
13509 /* Create MonoInsts for longs */
13510 for (i = 0; i < cfg->num_varinfo; i++) {
13511 MonoInst *ins = cfg->varinfo [i];
13513 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13514 switch (ins->type) {
13519 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13522 g_assert (ins->opcode == OP_REGOFFSET);
13524 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13526 tree->opcode = OP_REGOFFSET;
13527 tree->inst_basereg = ins->inst_basereg;
13528 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13530 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13532 tree->opcode = OP_REGOFFSET;
13533 tree->inst_basereg = ins->inst_basereg;
13534 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13544 if (cfg->compute_gc_maps) {
13545 /* registers need liveness info even for !non refs */
13546 for (i = 0; i < cfg->num_varinfo; i++) {
13547 MonoInst *ins = cfg->varinfo [i];
13549 if (ins->opcode == OP_REGVAR)
13550 ins->flags |= MONO_INST_GC_TRACK;
13554 if (cfg->gsharedvt) {
13555 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13557 for (i = 0; i < cfg->num_varinfo; ++i) {
13558 MonoInst *ins = cfg->varinfo [i];
13561 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13562 if (i >= cfg->locals_start) {
13564 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13565 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13566 ins->opcode = OP_GSHAREDVT_LOCAL;
13567 ins->inst_imm = idx;
13570 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13571 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13577 /* FIXME: widening and truncation */
13580 * As an optimization, when a variable allocated to the stack is first loaded into
13581 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13582 * the variable again.
13584 orig_next_vreg = cfg->next_vreg;
13585 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13586 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13590 * These arrays contain the first and last instructions accessing a given
13592 * Since we emit bblocks in the same order we process them here, and we
13593 * don't split live ranges, these will precisely describe the live range of
13594 * the variable, i.e. the instruction range where a valid value can be found
13595 * in the variables location.
13596 * The live range is computed using the liveness info computed by the liveness pass.
13597 * We can't use vmv->range, since that is an abstract live range, and we need
13598 * one which is instruction precise.
13599 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13601 /* FIXME: Only do this if debugging info is requested */
13602 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13603 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13604 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13605 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13607 /* Add spill loads/stores */
13608 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13611 if (cfg->verbose_level > 2)
13612 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13614 /* Clear vreg_to_lvreg array */
13615 for (i = 0; i < lvregs_len; i++)
13616 vreg_to_lvreg [lvregs [i]] = 0;
13620 MONO_BB_FOR_EACH_INS (bb, ins) {
13621 const char *spec = INS_INFO (ins->opcode);
13622 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13623 gboolean store, no_lvreg;
13624 int sregs [MONO_MAX_SRC_REGS];
13626 if (G_UNLIKELY (cfg->verbose_level > 2))
13627 mono_print_ins (ins);
13629 if (ins->opcode == OP_NOP)
13633 * We handle LDADDR here as well, since it can only be decomposed
13634 * when variable addresses are known.
13636 if (ins->opcode == OP_LDADDR) {
13637 MonoInst *var = ins->inst_p0;
13639 if (var->opcode == OP_VTARG_ADDR) {
13640 /* Happens on SPARC/S390 where vtypes are passed by reference */
13641 MonoInst *vtaddr = var->inst_left;
13642 if (vtaddr->opcode == OP_REGVAR) {
13643 ins->opcode = OP_MOVE;
13644 ins->sreg1 = vtaddr->dreg;
13646 else if (var->inst_left->opcode == OP_REGOFFSET) {
13647 ins->opcode = OP_LOAD_MEMBASE;
13648 ins->inst_basereg = vtaddr->inst_basereg;
13649 ins->inst_offset = vtaddr->inst_offset;
13652 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13653 /* gsharedvt arg passed by ref */
13654 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13656 ins->opcode = OP_LOAD_MEMBASE;
13657 ins->inst_basereg = var->inst_basereg;
13658 ins->inst_offset = var->inst_offset;
13659 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13660 MonoInst *load, *load2, *load3;
13661 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13662 int reg1, reg2, reg3;
13663 MonoInst *info_var = cfg->gsharedvt_info_var;
13664 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13668 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13671 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13673 g_assert (info_var);
13674 g_assert (locals_var);
13676 /* Mark the instruction used to compute the locals var as used */
13677 cfg->gsharedvt_locals_var_ins = NULL;
13679 /* Load the offset */
13680 if (info_var->opcode == OP_REGOFFSET) {
13681 reg1 = alloc_ireg (cfg);
13682 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13683 } else if (info_var->opcode == OP_REGVAR) {
13685 reg1 = info_var->dreg;
13687 g_assert_not_reached ();
13689 reg2 = alloc_ireg (cfg);
13690 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13691 /* Load the locals area address */
13692 reg3 = alloc_ireg (cfg);
13693 if (locals_var->opcode == OP_REGOFFSET) {
13694 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13695 } else if (locals_var->opcode == OP_REGVAR) {
13696 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13698 g_assert_not_reached ();
13700 /* Compute the address */
13701 ins->opcode = OP_PADD;
13705 mono_bblock_insert_before_ins (bb, ins, load3);
13706 mono_bblock_insert_before_ins (bb, load3, load2);
13708 mono_bblock_insert_before_ins (bb, load2, load);
13710 g_assert (var->opcode == OP_REGOFFSET);
13712 ins->opcode = OP_ADD_IMM;
13713 ins->sreg1 = var->inst_basereg;
13714 ins->inst_imm = var->inst_offset;
13717 *need_local_opts = TRUE;
13718 spec = INS_INFO (ins->opcode);
13721 if (ins->opcode < MONO_CEE_LAST) {
13722 mono_print_ins (ins);
13723 g_assert_not_reached ();
13727 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13731 if (MONO_IS_STORE_MEMBASE (ins)) {
13732 tmp_reg = ins->dreg;
13733 ins->dreg = ins->sreg2;
13734 ins->sreg2 = tmp_reg;
13737 spec2 [MONO_INST_DEST] = ' ';
13738 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13739 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13740 spec2 [MONO_INST_SRC3] = ' ';
13742 } else if (MONO_IS_STORE_MEMINDEX (ins))
13743 g_assert_not_reached ();
13748 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13749 printf ("\t %.3s %d", spec, ins->dreg);
13750 num_sregs = mono_inst_get_src_registers (ins, sregs);
13751 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13752 printf (" %d", sregs [srcindex]);
13759 regtype = spec [MONO_INST_DEST];
13760 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13763 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13764 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13765 MonoInst *store_ins;
13767 MonoInst *def_ins = ins;
13768 int dreg = ins->dreg; /* The original vreg */
13770 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13772 if (var->opcode == OP_REGVAR) {
13773 ins->dreg = var->dreg;
13774 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13776 * Instead of emitting a load+store, use a _membase opcode.
13778 g_assert (var->opcode == OP_REGOFFSET);
13779 if (ins->opcode == OP_MOVE) {
13783 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13784 ins->inst_basereg = var->inst_basereg;
13785 ins->inst_offset = var->inst_offset;
13788 spec = INS_INFO (ins->opcode);
13792 g_assert (var->opcode == OP_REGOFFSET);
13794 prev_dreg = ins->dreg;
13796 /* Invalidate any previous lvreg for this vreg */
13797 vreg_to_lvreg [ins->dreg] = 0;
13801 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13803 store_opcode = OP_STOREI8_MEMBASE_REG;
13806 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13808 #if SIZEOF_REGISTER != 8
13809 if (regtype == 'l') {
13810 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13811 mono_bblock_insert_after_ins (bb, ins, store_ins);
13812 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13813 mono_bblock_insert_after_ins (bb, ins, store_ins);
13814 def_ins = store_ins;
13819 g_assert (store_opcode != OP_STOREV_MEMBASE);
13821 /* Try to fuse the store into the instruction itself */
13822 /* FIXME: Add more instructions */
13823 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13824 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13825 ins->inst_imm = ins->inst_c0;
13826 ins->inst_destbasereg = var->inst_basereg;
13827 ins->inst_offset = var->inst_offset;
13828 spec = INS_INFO (ins->opcode);
13829 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13830 ins->opcode = store_opcode;
13831 ins->inst_destbasereg = var->inst_basereg;
13832 ins->inst_offset = var->inst_offset;
13836 tmp_reg = ins->dreg;
13837 ins->dreg = ins->sreg2;
13838 ins->sreg2 = tmp_reg;
13841 spec2 [MONO_INST_DEST] = ' ';
13842 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13843 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13844 spec2 [MONO_INST_SRC3] = ' ';
13846 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13847 // FIXME: The backends expect the base reg to be in inst_basereg
13848 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13850 ins->inst_basereg = var->inst_basereg;
13851 ins->inst_offset = var->inst_offset;
13852 spec = INS_INFO (ins->opcode);
13854 /* printf ("INS: "); mono_print_ins (ins); */
13855 /* Create a store instruction */
13856 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13858 /* Insert it after the instruction */
13859 mono_bblock_insert_after_ins (bb, ins, store_ins);
13861 def_ins = store_ins;
13864 * We can't assign ins->dreg to var->dreg here, since the
13865 * sregs could use it. So set a flag, and do it after
13868 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13869 dest_has_lvreg = TRUE;
13874 if (def_ins && !live_range_start [dreg]) {
13875 live_range_start [dreg] = def_ins;
13876 live_range_start_bb [dreg] = bb;
13879 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13882 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13883 tmp->inst_c1 = dreg;
13884 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13891 num_sregs = mono_inst_get_src_registers (ins, sregs);
13892 for (srcindex = 0; srcindex < 3; ++srcindex) {
13893 regtype = spec [MONO_INST_SRC1 + srcindex];
13894 sreg = sregs [srcindex];
13896 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13897 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13898 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13899 MonoInst *use_ins = ins;
13900 MonoInst *load_ins;
13901 guint32 load_opcode;
13903 if (var->opcode == OP_REGVAR) {
13904 sregs [srcindex] = var->dreg;
13905 //mono_inst_set_src_registers (ins, sregs);
13906 live_range_end [sreg] = use_ins;
13907 live_range_end_bb [sreg] = bb;
13909 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13912 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13913 /* var->dreg is a hreg */
13914 tmp->inst_c1 = sreg;
13915 mono_bblock_insert_after_ins (bb, ins, tmp);
13921 g_assert (var->opcode == OP_REGOFFSET);
13923 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13925 g_assert (load_opcode != OP_LOADV_MEMBASE);
13927 if (vreg_to_lvreg [sreg]) {
13928 g_assert (vreg_to_lvreg [sreg] != -1);
13930 /* The variable is already loaded to an lvreg */
13931 if (G_UNLIKELY (cfg->verbose_level > 2))
13932 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13933 sregs [srcindex] = vreg_to_lvreg [sreg];
13934 //mono_inst_set_src_registers (ins, sregs);
13938 /* Try to fuse the load into the instruction */
13939 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13940 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13941 sregs [0] = var->inst_basereg;
13942 //mono_inst_set_src_registers (ins, sregs);
13943 ins->inst_offset = var->inst_offset;
13944 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13945 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13946 sregs [1] = var->inst_basereg;
13947 //mono_inst_set_src_registers (ins, sregs);
13948 ins->inst_offset = var->inst_offset;
13950 if (MONO_IS_REAL_MOVE (ins)) {
13951 ins->opcode = OP_NOP;
13954 //printf ("%d ", srcindex); mono_print_ins (ins);
13956 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13958 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13959 if (var->dreg == prev_dreg) {
13961 * sreg refers to the value loaded by the load
13962 * emitted below, but we need to use ins->dreg
13963 * since it refers to the store emitted earlier.
13967 g_assert (sreg != -1);
13968 vreg_to_lvreg [var->dreg] = sreg;
13969 g_assert (lvregs_len < 1024);
13970 lvregs [lvregs_len ++] = var->dreg;
13974 sregs [srcindex] = sreg;
13975 //mono_inst_set_src_registers (ins, sregs);
13977 #if SIZEOF_REGISTER != 8
13978 if (regtype == 'l') {
13979 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13980 mono_bblock_insert_before_ins (bb, ins, load_ins);
13981 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13982 mono_bblock_insert_before_ins (bb, ins, load_ins);
13983 use_ins = load_ins;
13988 #if SIZEOF_REGISTER == 4
13989 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13991 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13992 mono_bblock_insert_before_ins (bb, ins, load_ins);
13993 use_ins = load_ins;
13997 if (var->dreg < orig_next_vreg) {
13998 live_range_end [var->dreg] = use_ins;
13999 live_range_end_bb [var->dreg] = bb;
14002 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14005 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14006 tmp->inst_c1 = var->dreg;
14007 mono_bblock_insert_after_ins (bb, ins, tmp);
14011 mono_inst_set_src_registers (ins, sregs);
14013 if (dest_has_lvreg) {
14014 g_assert (ins->dreg != -1);
14015 vreg_to_lvreg [prev_dreg] = ins->dreg;
14016 g_assert (lvregs_len < 1024);
14017 lvregs [lvregs_len ++] = prev_dreg;
14018 dest_has_lvreg = FALSE;
14022 tmp_reg = ins->dreg;
14023 ins->dreg = ins->sreg2;
14024 ins->sreg2 = tmp_reg;
14027 if (MONO_IS_CALL (ins)) {
14028 /* Clear vreg_to_lvreg array */
14029 for (i = 0; i < lvregs_len; i++)
14030 vreg_to_lvreg [lvregs [i]] = 0;
14032 } else if (ins->opcode == OP_NOP) {
14034 MONO_INST_NULLIFY_SREGS (ins);
14037 if (cfg->verbose_level > 2)
14038 mono_print_ins_index (1, ins);
14041 /* Extend the live range based on the liveness info */
14042 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14043 for (i = 0; i < cfg->num_varinfo; i ++) {
14044 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14046 if (vreg_is_volatile (cfg, vi->vreg))
14047 /* The liveness info is incomplete */
14050 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14051 /* Live from at least the first ins of this bb */
14052 live_range_start [vi->vreg] = bb->code;
14053 live_range_start_bb [vi->vreg] = bb;
14056 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14057 /* Live at least until the last ins of this bb */
14058 live_range_end [vi->vreg] = bb->last_ins;
14059 live_range_end_bb [vi->vreg] = bb;
14065 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14067 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14068 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14070 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14071 for (i = 0; i < cfg->num_varinfo; ++i) {
14072 int vreg = MONO_VARINFO (cfg, i)->vreg;
14075 if (live_range_start [vreg]) {
14076 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14078 ins->inst_c1 = vreg;
14079 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14081 if (live_range_end [vreg]) {
14082 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14084 ins->inst_c1 = vreg;
14085 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14086 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14088 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14094 if (cfg->gsharedvt_locals_var_ins) {
14095 /* Nullify if unused */
14096 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14097 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14100 g_free (live_range_start);
14101 g_free (live_range_end);
14102 g_free (live_range_start_bb);
14103 g_free (live_range_end_bb);
14108 * - use 'iadd' instead of 'int_add'
14109 * - handling ovf opcodes: decompose in method_to_ir.
14110 * - unify iregs/fregs
14111 * -> partly done, the missing parts are:
14112 * - a more complete unification would involve unifying the hregs as well, so
14113 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14114 * would no longer map to the machine hregs, so the code generators would need to
14115 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14116 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14117 * fp/non-fp branches speeds it up by about 15%.
14118 * - use sext/zext opcodes instead of shifts
14120 * - get rid of TEMPLOADs if possible and use vregs instead
14121 * - clean up usage of OP_P/OP_ opcodes
14122 * - cleanup usage of DUMMY_USE
14123 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14125 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14126 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14127 * - make sure handle_stack_args () is called before the branch is emitted
14128 * - when the new IR is done, get rid of all unused stuff
14129 * - COMPARE/BEQ as separate instructions or unify them ?
14130 * - keeping them separate allows specialized compare instructions like
14131 * compare_imm, compare_membase
14132 * - most back ends unify fp compare+branch, fp compare+ceq
14133 * - integrate mono_save_args into inline_method
14134 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14135 * - handle long shift opts on 32 bit platforms somehow: they require
14136 * 3 sregs (2 for arg1 and 1 for arg2)
14137 * - make byref a 'normal' type.
14138 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14139 * variable if needed.
14140 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14141 * like inline_method.
14142 * - remove inlining restrictions
14143 * - fix LNEG and enable cfold of INEG
14144 * - generalize x86 optimizations like ldelema as a peephole optimization
14145 * - add store_mem_imm for amd64
14146 * - optimize the loading of the interruption flag in the managed->native wrappers
14147 * - avoid special handling of OP_NOP in passes
14148 * - move code inserting instructions into one function/macro.
14149 * - try a coalescing phase after liveness analysis
14150 * - add float -> vreg conversion + local optimizations on !x86
14151 * - figure out how to handle decomposed branches during optimizations, ie.
14152 * compare+branch, op_jump_table+op_br etc.
14153 * - promote RuntimeXHandles to vregs
14154 * - vtype cleanups:
14155 * - add a NEW_VARLOADA_VREG macro
14156 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14157 * accessing vtype fields.
14158 * - get rid of I8CONST on 64 bit platforms
14159 * - dealing with the increase in code size due to branches created during opcode
14161 * - use extended basic blocks
14162 * - all parts of the JIT
14163 * - handle_global_vregs () && local regalloc
14164 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14165 * - sources of increase in code size:
14168 * - isinst and castclass
14169 * - lvregs not allocated to global registers even if used multiple times
14170 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14172 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14173 * - add all micro optimizations from the old JIT
14174 * - put tree optimizations into the deadce pass
14175 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14176 * specific function.
14177 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14178 * fcompare + branchCC.
14179 * - create a helper function for allocating a stack slot, taking into account
14180 * MONO_CFG_HAS_SPILLUP.
14182 * - merge the ia64 switch changes.
14183 * - optimize mono_regstate2_alloc_int/float.
14184 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14185 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14186 * parts of the tree could be separated by other instructions, killing the tree
14187 * arguments, or stores killing loads etc. Also, should we fold loads into other
14188 * instructions if the result of the load is used multiple times ?
14189 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14190 * - LAST MERGE: 108395.
14191 * - when returning vtypes in registers, generate IR and append it to the end of the
14192 * last bb instead of doing it in the epilog.
14193 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14201 - When to decompose opcodes:
14202 - earlier: this makes some optimizations hard to implement, since the low level IR
14203 no longer contains the neccessary information. But it is easier to do.
14204 - later: harder to implement, enables more optimizations.
14205 - Branches inside bblocks:
14206 - created when decomposing complex opcodes.
14207 - branches to another bblock: harmless, but not tracked by the branch
14208 optimizations, so need to branch to a label at the start of the bblock.
14209 - branches to inside the same bblock: very problematic, trips up the local
14210 reg allocator. Can be fixed by spitting the current bblock, but that is a
14211 complex operation, since some local vregs can become global vregs etc.
14212 - Local/global vregs:
14213 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14214 local register allocator.
14215 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14216 structure, created by mono_create_var (). Assigned to hregs or the stack by
14217 the global register allocator.
14218 - When to do optimizations like alu->alu_imm:
14219 - earlier -> saves work later on since the IR will be smaller/simpler
14220 - later -> can work on more instructions
14221 - Handling of valuetypes:
14222 - When a vtype is pushed on the stack, a new temporary is created, an
14223 instruction computing its address (LDADDR) is emitted and pushed on
14224 the stack. Need to optimize cases when the vtype is used immediately as in
14225 argument passing, stloc etc.
14226 - Instead of the to_end stuff in the old JIT, simply call the function handling
14227 the values on the stack before emitting the last instruction of the bb.
14230 #endif /* DISABLE_JIT */