2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
310 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
332 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
366 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 /* Emit conversions so both operands of a binary opcode are of the same type */
462 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
464 MonoInst *arg1 = *arg1_ref;
465 MonoInst *arg2 = *arg2_ref;
468 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
469 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
472 /* Mixing r4/r8 is allowed by the spec */
473 if (arg1->type == STACK_R4) {
474 int dreg = alloc_freg (cfg);
476 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
477 conv->type = STACK_R8;
481 if (arg2->type == STACK_R4) {
482 int dreg = alloc_freg (cfg);
484 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
485 conv->type = STACK_R8;
491 #if SIZEOF_REGISTER == 8
492 /* FIXME: Need to add many more cases */
493 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
496 int dr = alloc_preg (cfg);
497 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
498 (ins)->sreg2 = widen->dreg;
503 #define ADD_BINOP(op) do { \
504 MONO_INST_NEW (cfg, ins, (op)); \
506 ins->sreg1 = sp [0]->dreg; \
507 ins->sreg2 = sp [1]->dreg; \
508 type_from_op (cfg, ins, sp [0], sp [1]); \
510 /* Have to insert a widening op */ \
511 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
512 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
517 #define ADD_UNOP(op) do { \
518 MONO_INST_NEW (cfg, ins, (op)); \
520 ins->sreg1 = sp [0]->dreg; \
521 type_from_op (cfg, ins, sp [0], NULL); \
523 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
524 MONO_ADD_INS ((cfg)->cbb, (ins)); \
525 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
528 #define ADD_BINCOND(next_block) do { \
531 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
532 cmp->sreg1 = sp [0]->dreg; \
533 cmp->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
537 type_from_op (cfg, ins, sp [0], sp [1]); \
538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
539 GET_BBLOCK (cfg, tblock, target); \
540 link_bblock (cfg, bblock, tblock); \
541 ins->inst_true_bb = tblock; \
542 if ((next_block)) { \
543 link_bblock (cfg, bblock, (next_block)); \
544 ins->inst_false_bb = (next_block); \
545 start_new_bblock = 1; \
547 GET_BBLOCK (cfg, tblock, ip); \
548 link_bblock (cfg, bblock, tblock); \
549 ins->inst_false_bb = tblock; \
550 start_new_bblock = 2; \
552 if (sp != stack_start) { \
553 handle_stack_args (cfg, stack_start, sp - stack_start); \
554 CHECK_UNVERIFIABLE (cfg); \
556 MONO_ADD_INS (bblock, cmp); \
557 MONO_ADD_INS (bblock, ins); \
561 * link_bblock: Links two basic blocks
563 * links two basic blocks in the control flow graph, the 'from'
564 * argument is the starting block and the 'to' argument is the block
565 * the control flow ends to after 'from'.
568 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
570 MonoBasicBlock **newa;
574 if (from->cil_code) {
576 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
581 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 printf ("edge from entry to exit\n");
588 for (i = 0; i < from->out_count; ++i) {
589 if (to == from->out_bb [i]) {
595 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
596 for (i = 0; i < from->out_count; ++i) {
597 newa [i] = from->out_bb [i];
605 for (i = 0; i < to->in_count; ++i) {
606 if (from == to->in_bb [i]) {
612 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
613 for (i = 0; i < to->in_count; ++i) {
614 newa [i] = to->in_bb [i];
623 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
625 link_bblock (cfg, from, to);
629 * mono_find_block_region:
631 * We mark each basic block with a region ID. We use that to avoid BB
632 * optimizations when blocks are in different regions.
635 * A region token that encodes where this region is, and information
636 * about the clause owner for this block.
638 * The region encodes the try/catch/filter clause that owns this block
639 * as well as the type. -1 is a special value that represents a block
640 * that is in none of try/catch/filter.
643 mono_find_block_region (MonoCompile *cfg, int offset)
645 MonoMethodHeader *header = cfg->header;
646 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
652 (offset < (clause->handler_offset)))
653 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
655 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
656 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
657 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
658 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
659 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
664 for (i = 0; i < header->num_clauses; ++i) {
665 clause = &header->clauses [i];
667 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
668 return ((i + 1) << 8) | clause->flags;
675 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
677 MonoMethodHeader *header = cfg->header;
678 MonoExceptionClause *clause;
682 for (i = 0; i < header->num_clauses; ++i) {
683 clause = &header->clauses [i];
684 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
685 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
686 if (clause->flags == type)
687 res = g_list_append (res, clause);
694 mono_create_spvar_for_region (MonoCompile *cfg, int region)
698 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
702 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
703 /* prevent it from being register allocated */
704 var->flags |= MONO_INST_VOLATILE;
706 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
710 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
712 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
716 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
720 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
724 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
725 /* prevent it from being register allocated */
726 var->flags |= MONO_INST_VOLATILE;
728 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
734 * Returns the type used in the eval stack when @type is loaded.
735 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
738 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
742 type = mini_get_underlying_type (cfg, type);
743 inst->klass = klass = mono_class_from_mono_type (type);
745 inst->type = STACK_MP;
750 switch (type->type) {
752 inst->type = STACK_INV;
760 inst->type = STACK_I4;
765 case MONO_TYPE_FNPTR:
766 inst->type = STACK_PTR;
768 case MONO_TYPE_CLASS:
769 case MONO_TYPE_STRING:
770 case MONO_TYPE_OBJECT:
771 case MONO_TYPE_SZARRAY:
772 case MONO_TYPE_ARRAY:
773 inst->type = STACK_OBJ;
777 inst->type = STACK_I8;
780 inst->type = cfg->r4_stack_type;
783 inst->type = STACK_R8;
785 case MONO_TYPE_VALUETYPE:
786 if (type->data.klass->enumtype) {
787 type = mono_class_enum_basetype (type->data.klass);
791 inst->type = STACK_VTYPE;
794 case MONO_TYPE_TYPEDBYREF:
795 inst->klass = mono_defaults.typed_reference_class;
796 inst->type = STACK_VTYPE;
798 case MONO_TYPE_GENERICINST:
799 type = &type->data.generic_class->container_class->byval_arg;
803 g_assert (cfg->generic_sharing_context);
804 if (mini_is_gsharedvt_type (cfg, type)) {
805 g_assert (cfg->gsharedvt);
806 inst->type = STACK_VTYPE;
808 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
812 g_error ("unknown type 0x%02x in eval stack type", type->type);
817 * The following tables are used to quickly validate the IL code in type_from_op ().
820 bin_num_table [STACK_MAX] [STACK_MAX] = {
821 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
826 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
834 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
837 /* reduce the size of this table */
839 bin_int_table [STACK_MAX] [STACK_MAX] = {
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
851 bin_comp_table [STACK_MAX] [STACK_MAX] = {
852 /* Inv i L p F & O vt r4 */
854 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
855 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
856 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
857 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
858 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
859 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
860 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
861 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
864 /* reduce the size of this table */
866 shift_table [STACK_MAX] [STACK_MAX] = {
867 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
878 * Tables to map from the non-specific opcode to the matching
879 * type-specific opcode.
881 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
883 binops_op_map [STACK_MAX] = {
884 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
887 /* handles from CEE_NEG to CEE_CONV_U8 */
889 unops_op_map [STACK_MAX] = {
890 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
893 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
895 ovfops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
899 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
901 ovf2ops_op_map [STACK_MAX] = {
902 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
905 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
907 ovf3ops_op_map [STACK_MAX] = {
908 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
911 /* handles from CEE_BEQ to CEE_BLT_UN */
913 beqops_op_map [STACK_MAX] = {
914 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
917 /* handles from CEE_CEQ to CEE_CLT_UN */
919 ceqops_op_map [STACK_MAX] = {
920 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
924 * Sets ins->type (the type on the eval stack) according to the
925 * type of the opcode and the arguments to it.
926 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
928 * FIXME: this function sets ins->type unconditionally in some cases, but
929 * it should set it to invalid for some types (a conv.x on an object)
932 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
934 switch (ins->opcode) {
941 /* FIXME: check unverifiable args for STACK_MP */
942 ins->type = bin_num_table [src1->type] [src2->type];
943 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_int_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
956 ins->type = shift_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
962 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
963 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
964 ins->opcode = OP_LCOMPARE;
965 else if (src1->type == STACK_R4)
966 ins->opcode = OP_RCOMPARE;
967 else if (src1->type == STACK_R8)
968 ins->opcode = OP_FCOMPARE;
970 ins->opcode = OP_ICOMPARE;
972 case OP_ICOMPARE_IMM:
973 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE_IMM;
987 ins->opcode += beqops_op_map [src1->type];
990 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
991 ins->opcode += ceqops_op_map [src1->type];
997 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
998 ins->opcode += ceqops_op_map [src1->type];
1002 ins->type = neg_table [src1->type];
1003 ins->opcode += unops_op_map [ins->type];
1006 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1007 ins->type = src1->type;
1009 ins->type = STACK_INV;
1010 ins->opcode += unops_op_map [ins->type];
1016 ins->type = STACK_I4;
1017 ins->opcode += unops_op_map [src1->type];
1020 ins->type = STACK_R8;
1021 switch (src1->type) {
1024 ins->opcode = OP_ICONV_TO_R_UN;
1027 ins->opcode = OP_LCONV_TO_R_UN;
1031 case CEE_CONV_OVF_I1:
1032 case CEE_CONV_OVF_U1:
1033 case CEE_CONV_OVF_I2:
1034 case CEE_CONV_OVF_U2:
1035 case CEE_CONV_OVF_I4:
1036 case CEE_CONV_OVF_U4:
1037 ins->type = STACK_I4;
1038 ins->opcode += ovf3ops_op_map [src1->type];
1040 case CEE_CONV_OVF_I_UN:
1041 case CEE_CONV_OVF_U_UN:
1042 ins->type = STACK_PTR;
1043 ins->opcode += ovf2ops_op_map [src1->type];
1045 case CEE_CONV_OVF_I1_UN:
1046 case CEE_CONV_OVF_I2_UN:
1047 case CEE_CONV_OVF_I4_UN:
1048 case CEE_CONV_OVF_U1_UN:
1049 case CEE_CONV_OVF_U2_UN:
1050 case CEE_CONV_OVF_U4_UN:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf2ops_op_map [src1->type];
1055 ins->type = STACK_PTR;
1056 switch (src1->type) {
1058 ins->opcode = OP_ICONV_TO_U;
1062 #if SIZEOF_VOID_P == 8
1063 ins->opcode = OP_LCONV_TO_U;
1065 ins->opcode = OP_MOVE;
1069 ins->opcode = OP_LCONV_TO_U;
1072 ins->opcode = OP_FCONV_TO_U;
1078 ins->type = STACK_I8;
1079 ins->opcode += unops_op_map [src1->type];
1081 case CEE_CONV_OVF_I8:
1082 case CEE_CONV_OVF_U8:
1083 ins->type = STACK_I8;
1084 ins->opcode += ovf3ops_op_map [src1->type];
1086 case CEE_CONV_OVF_U8_UN:
1087 case CEE_CONV_OVF_I8_UN:
1088 ins->type = STACK_I8;
1089 ins->opcode += ovf2ops_op_map [src1->type];
1092 ins->type = cfg->r4_stack_type;
1093 ins->opcode += unops_op_map [src1->type];
1096 ins->type = STACK_R8;
1097 ins->opcode += unops_op_map [src1->type];
1100 ins->type = STACK_R8;
1104 ins->type = STACK_I4;
1105 ins->opcode += ovfops_op_map [src1->type];
1108 case CEE_CONV_OVF_I:
1109 case CEE_CONV_OVF_U:
1110 ins->type = STACK_PTR;
1111 ins->opcode += ovfops_op_map [src1->type];
1114 case CEE_ADD_OVF_UN:
1116 case CEE_MUL_OVF_UN:
1118 case CEE_SUB_OVF_UN:
1119 ins->type = bin_num_table [src1->type] [src2->type];
1120 ins->opcode += ovfops_op_map [src1->type];
1121 if (ins->type == STACK_R8)
1122 ins->type = STACK_INV;
1124 case OP_LOAD_MEMBASE:
1125 ins->type = STACK_PTR;
1127 case OP_LOADI1_MEMBASE:
1128 case OP_LOADU1_MEMBASE:
1129 case OP_LOADI2_MEMBASE:
1130 case OP_LOADU2_MEMBASE:
1131 case OP_LOADI4_MEMBASE:
1132 case OP_LOADU4_MEMBASE:
1133 ins->type = STACK_PTR;
1135 case OP_LOADI8_MEMBASE:
1136 ins->type = STACK_I8;
1138 case OP_LOADR4_MEMBASE:
1139 ins->type = cfg->r4_stack_type;
1141 case OP_LOADR8_MEMBASE:
1142 ins->type = STACK_R8;
1145 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1149 if (ins->type == STACK_MP)
1150 ins->klass = mono_defaults.object_class;
1155 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1161 param_table [STACK_MAX] [STACK_MAX] = {
1166 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1170 switch (args->type) {
1180 for (i = 0; i < sig->param_count; ++i) {
1181 switch (args [i].type) {
1185 if (!sig->params [i]->byref)
1189 if (sig->params [i]->byref)
1191 switch (sig->params [i]->type) {
1192 case MONO_TYPE_CLASS:
1193 case MONO_TYPE_STRING:
1194 case MONO_TYPE_OBJECT:
1195 case MONO_TYPE_SZARRAY:
1196 case MONO_TYPE_ARRAY:
1203 if (sig->params [i]->byref)
1205 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1214 /*if (!param_table [args [i].type] [sig->params [i]->type])
1222 * When we need a pointer to the current domain many times in a method, we
1223 * call mono_domain_get() once and we store the result in a local variable.
1224 * This function returns the variable that represents the MonoDomain*.
1226 inline static MonoInst *
1227 mono_get_domainvar (MonoCompile *cfg)
1229 if (!cfg->domainvar)
1230 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1231 return cfg->domainvar;
1235 * The got_var contains the address of the Global Offset Table when AOT
1239 mono_get_got_var (MonoCompile *cfg)
1241 #ifdef MONO_ARCH_NEED_GOT_VAR
1242 if (!cfg->compile_aot)
1244 if (!cfg->got_var) {
1245 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 return cfg->got_var;
1254 mono_get_vtable_var (MonoCompile *cfg)
1256 g_assert (cfg->generic_sharing_context);
1258 if (!cfg->rgctx_var) {
1259 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1260 /* force the var to be stack allocated */
1261 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1264 return cfg->rgctx_var;
1268 type_from_stack_type (MonoInst *ins) {
1269 switch (ins->type) {
1270 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1271 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1272 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1273 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1274 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1276 return &ins->klass->this_arg;
1277 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1278 case STACK_VTYPE: return &ins->klass->byval_arg;
1280 g_error ("stack type %d to monotype not handled\n", ins->type);
1285 static G_GNUC_UNUSED int
1286 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1288 t = mono_type_get_underlying_type (t);
1300 case MONO_TYPE_FNPTR:
1302 case MONO_TYPE_CLASS:
1303 case MONO_TYPE_STRING:
1304 case MONO_TYPE_OBJECT:
1305 case MONO_TYPE_SZARRAY:
1306 case MONO_TYPE_ARRAY:
1312 return cfg->r4_stack_type;
1315 case MONO_TYPE_VALUETYPE:
1316 case MONO_TYPE_TYPEDBYREF:
1318 case MONO_TYPE_GENERICINST:
1319 if (mono_type_generic_inst_is_valuetype (t))
1325 g_assert_not_reached ();
1332 array_access_to_klass (int opcode)
1336 return mono_defaults.byte_class;
1338 return mono_defaults.uint16_class;
1341 return mono_defaults.int_class;
1344 return mono_defaults.sbyte_class;
1347 return mono_defaults.int16_class;
1350 return mono_defaults.int32_class;
1352 return mono_defaults.uint32_class;
1355 return mono_defaults.int64_class;
1358 return mono_defaults.single_class;
1361 return mono_defaults.double_class;
1362 case CEE_LDELEM_REF:
1363 case CEE_STELEM_REF:
1364 return mono_defaults.object_class;
1366 g_assert_not_reached ();
1372 * We try to share variables when possible
1375 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1380 /* inlining can result in deeper stacks */
1381 if (slot >= cfg->header->max_stack)
1382 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1384 pos = ins->type - 1 + slot * STACK_MAX;
1386 switch (ins->type) {
1393 if ((vnum = cfg->intvars [pos]))
1394 return cfg->varinfo [vnum];
1395 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1396 cfg->intvars [pos] = res->inst_c0;
1399 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1405 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1408 * Don't use this if a generic_context is set, since that means AOT can't
1409 * look up the method using just the image+token.
1410 * table == 0 means this is a reference made from a wrapper.
1412 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1413 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1414 jump_info_token->image = image;
1415 jump_info_token->token = token;
1416 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1421 * This function is called to handle items that are left on the evaluation stack
1422 * at basic block boundaries. What happens is that we save the values to local variables
1423 * and we reload them later when first entering the target basic block (with the
1424 * handle_loaded_temps () function).
1425 * A single joint point will use the same variables (stored in the array bb->out_stack or
1426 * bb->in_stack, if the basic block is before or after the joint point).
1428 * This function needs to be called _before_ emitting the last instruction of
1429 * the bb (i.e. before emitting a branch).
1430 * If the stack merge fails at a join point, cfg->unverifiable is set.
1433 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1436 MonoBasicBlock *bb = cfg->cbb;
1437 MonoBasicBlock *outb;
1438 MonoInst *inst, **locals;
1443 if (cfg->verbose_level > 3)
1444 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1445 if (!bb->out_scount) {
1446 bb->out_scount = count;
1447 //printf ("bblock %d has out:", bb->block_num);
1449 for (i = 0; i < bb->out_count; ++i) {
1450 outb = bb->out_bb [i];
1451 /* exception handlers are linked, but they should not be considered for stack args */
1452 if (outb->flags & BB_EXCEPTION_HANDLER)
1454 //printf (" %d", outb->block_num);
1455 if (outb->in_stack) {
1457 bb->out_stack = outb->in_stack;
1463 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1464 for (i = 0; i < count; ++i) {
1466 * try to reuse temps already allocated for this purpouse, if they occupy the same
1467 * stack slot and if they are of the same type.
1468 * This won't cause conflicts since if 'local' is used to
1469 * store one of the values in the in_stack of a bblock, then
1470 * the same variable will be used for the same outgoing stack
1472 * This doesn't work when inlining methods, since the bblocks
1473 * in the inlined methods do not inherit their in_stack from
1474 * the bblock they are inlined to. See bug #58863 for an
1477 if (cfg->inlined_method)
1478 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1480 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1485 for (i = 0; i < bb->out_count; ++i) {
1486 outb = bb->out_bb [i];
1487 /* exception handlers are linked, but they should not be considered for stack args */
1488 if (outb->flags & BB_EXCEPTION_HANDLER)
1490 if (outb->in_scount) {
1491 if (outb->in_scount != bb->out_scount) {
1492 cfg->unverifiable = TRUE;
1495 continue; /* check they are the same locals */
1497 outb->in_scount = count;
1498 outb->in_stack = bb->out_stack;
1501 locals = bb->out_stack;
1503 for (i = 0; i < count; ++i) {
1504 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1505 inst->cil_code = sp [i]->cil_code;
1506 sp [i] = locals [i];
1507 if (cfg->verbose_level > 3)
1508 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1512 * It is possible that the out bblocks already have in_stack assigned, and
1513 * the in_stacks differ. In this case, we will store to all the different
1520 /* Find a bblock which has a different in_stack */
1522 while (bindex < bb->out_count) {
1523 outb = bb->out_bb [bindex];
1524 /* exception handlers are linked, but they should not be considered for stack args */
1525 if (outb->flags & BB_EXCEPTION_HANDLER) {
1529 if (outb->in_stack != locals) {
1530 for (i = 0; i < count; ++i) {
1531 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1532 inst->cil_code = sp [i]->cil_code;
1533 sp [i] = locals [i];
1534 if (cfg->verbose_level > 3)
1535 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1537 locals = outb->in_stack;
1546 /* Emit code which loads interface_offsets [klass->interface_id]
1547 * The array is stored in memory before vtable.
1550 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1552 if (cfg->compile_aot) {
1553 int ioffset_reg = alloc_preg (cfg);
1554 int iid_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1566 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1568 int ibitmap_reg = alloc_preg (cfg);
1569 #ifdef COMPRESSED_INTERFACE_BITMAP
1571 MonoInst *res, *ins;
1572 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1573 MONO_ADD_INS (cfg->cbb, ins);
1575 if (cfg->compile_aot)
1576 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1578 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1579 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1580 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1582 int ibitmap_byte_reg = alloc_preg (cfg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1586 if (cfg->compile_aot) {
1587 int iid_reg = alloc_preg (cfg);
1588 int shifted_iid_reg = alloc_preg (cfg);
1589 int ibitmap_byte_address_reg = alloc_preg (cfg);
1590 int masked_iid_reg = alloc_preg (cfg);
1591 int iid_one_bit_reg = alloc_preg (cfg);
1592 int iid_bit_reg = alloc_preg (cfg);
1593 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1595 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1598 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1600 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1602 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1609 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1610 * stored in "klass_reg" implements the interface "klass".
1613 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1615 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1619 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1620 * stored in "vtable_reg" implements the interface "klass".
1623 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1625 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1629 * Emit code which checks whenever the interface id of @klass is smaller than
1630 * than the value given by max_iid_reg.
1633 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1634 MonoBasicBlock *false_target)
1636 if (cfg->compile_aot) {
1637 int iid_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1646 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1649 /* Same as above, but obtains max_iid from a vtable */
1651 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1652 MonoBasicBlock *false_target)
1654 int max_iid_reg = alloc_preg (cfg);
1656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1657 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1660 /* Same as above, but obtains max_iid from a klass */
1662 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1663 MonoBasicBlock *false_target)
1665 int max_iid_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1668 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1672 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 mono_class_setup_supertypes (klass);
1680 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1688 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1689 } else if (cfg->compile_aot) {
1690 int const_reg = alloc_preg (cfg);
1691 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1700 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1702 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1706 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1708 int intf_reg = alloc_preg (cfg);
1710 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1711 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1716 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1720 * Variant of the above that takes a register to the class, not the vtable.
1723 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1725 int intf_bit_reg = alloc_preg (cfg);
1727 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1728 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1733 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1737 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 } else if (cfg->compile_aot) {
1742 int const_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1752 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1754 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1758 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1760 if (cfg->compile_aot) {
1761 int const_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1771 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1774 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1777 int rank_reg = alloc_preg (cfg);
1778 int eclass_reg = alloc_preg (cfg);
1780 g_assert (!klass_inst);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1783 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1784 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1786 if (klass->cast_class == mono_defaults.object_class) {
1787 int parent_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1789 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1792 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class) {
1795 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1796 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1797 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1799 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1800 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1803 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1804 /* Check that the object is a vector too */
1805 int bounds_reg = alloc_preg (cfg);
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1808 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1811 int idepth_reg = alloc_preg (cfg);
1812 int stypes_reg = alloc_preg (cfg);
1813 int stype = alloc_preg (cfg);
1815 mono_class_setup_supertypes (klass);
1817 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1820 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1824 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1829 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1831 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1835 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1839 g_assert (val == 0);
1844 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1855 #if SIZEOF_REGISTER == 8
1857 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1863 val_reg = alloc_preg (cfg);
1865 if (SIZEOF_REGISTER == 8)
1866 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1868 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1871 /* This could be optimized further if neccesary */
1873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1880 #if !NO_UNALIGNED_ACCESS
1881 if (SIZEOF_REGISTER == 8) {
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1913 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1920 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1921 g_assert (size < 10000);
1924 /* This could be optimized further if neccesary */
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 #if !NO_UNALIGNED_ACCESS
1936 if (SIZEOF_REGISTER == 8) {
1938 cur_reg = alloc_preg (cfg);
1939 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1965 cur_reg = alloc_preg (cfg);
1966 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1975 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1979 if (cfg->compile_aot) {
1980 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1981 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1983 ins->sreg2 = c->dreg;
1984 MONO_ADD_INS (cfg->cbb, ins);
1986 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1988 ins->inst_offset = mini_get_tls_offset (tls_key);
1989 MONO_ADD_INS (cfg->cbb, ins);
1996 * Emit IR to push the current LMF onto the LMF stack.
1999 emit_push_lmf (MonoCompile *cfg)
2002 * Emit IR to push the LMF:
2003 * lmf_addr = <lmf_addr from tls>
2004 * lmf->lmf_addr = lmf_addr
2005 * lmf->prev_lmf = *lmf_addr
2008 int lmf_reg, prev_lmf_reg;
2009 MonoInst *ins, *lmf_ins;
2014 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2015 /* Load current lmf */
2016 lmf_ins = mono_get_lmf_intrinsic (cfg);
2018 MONO_ADD_INS (cfg->cbb, lmf_ins);
2019 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2020 lmf_reg = ins->dreg;
2021 /* Save previous_lmf */
2022 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2024 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2027 * Store lmf_addr in a variable, so it can be allocated to a global register.
2029 if (!cfg->lmf_addr_var)
2030 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2033 ins = mono_get_jit_tls_intrinsic (cfg);
2035 int jit_tls_dreg = ins->dreg;
2037 MONO_ADD_INS (cfg->cbb, ins);
2038 lmf_reg = alloc_preg (cfg);
2039 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2041 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2046 MONO_ADD_INS (cfg->cbb, lmf_ins);
2049 MonoInst *args [16], *jit_tls_ins, *ins;
2051 /* Inline mono_get_lmf_addr () */
2052 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2054 /* Load mono_jit_tls_id */
2055 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2056 /* call pthread_getspecific () */
2057 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2058 /* lmf_addr = &jit_tls->lmf */
2059 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2062 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2066 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2068 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2069 lmf_reg = ins->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 /* Save previous_lmf */
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2076 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2083 * Emit IR to pop the current LMF from the LMF stack.
2086 emit_pop_lmf (MonoCompile *cfg)
2088 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2094 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2095 lmf_reg = ins->dreg;
2097 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2098 /* Load previous_lmf */
2099 prev_lmf_reg = alloc_preg (cfg);
2100 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2102 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2105 * Emit IR to pop the LMF:
2106 * *(lmf->lmf_addr) = lmf->prev_lmf
2108 /* This could be called before emit_push_lmf () */
2109 if (!cfg->lmf_addr_var)
2110 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2111 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2113 prev_lmf_reg = alloc_preg (cfg);
2114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2115 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2120 emit_instrumentation_call (MonoCompile *cfg, void *func)
2122 MonoInst *iargs [1];
2125 * Avoid instrumenting inlined methods since it can
2126 * distort profiling results.
2128 if (cfg->method != cfg->current_method)
2131 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2132 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2133 mono_emit_jit_icall (cfg, func, iargs);
2138 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2141 type = mini_get_underlying_type (cfg, type);
2142 switch (type->type) {
2143 case MONO_TYPE_VOID:
2144 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2155 case MONO_TYPE_FNPTR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 case MONO_TYPE_CLASS:
2158 case MONO_TYPE_STRING:
2159 case MONO_TYPE_OBJECT:
2160 case MONO_TYPE_SZARRAY:
2161 case MONO_TYPE_ARRAY:
2162 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2165 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2168 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2170 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2172 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 case MONO_TYPE_VALUETYPE:
2174 if (type->data.klass->enumtype) {
2175 type = mono_class_enum_basetype (type->data.klass);
2178 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2179 case MONO_TYPE_TYPEDBYREF:
2180 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 case MONO_TYPE_GENERICINST:
2182 type = &type->data.generic_class->container_class->byval_arg;
2185 case MONO_TYPE_MVAR:
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2189 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2195 * target_type_is_incompatible:
2196 * @cfg: MonoCompile context
2198 * Check that the item @arg on the evaluation stack can be stored
2199 * in the target type (can be a local, or field, etc).
2200 * The cfg arg can be used to check if we need verification or just
2203 * Returns: non-0 value if arg can't be stored on a target.
2206 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2208 MonoType *simple_type;
2211 if (target->byref) {
2212 /* FIXME: check that the pointed to types match */
2213 if (arg->type == STACK_MP)
2214 return arg->klass != mono_class_from_mono_type (target);
2215 if (arg->type == STACK_PTR)
2220 simple_type = mini_get_underlying_type (cfg, target);
2221 switch (simple_type->type) {
2222 case MONO_TYPE_VOID:
2230 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2234 /* STACK_MP is needed when setting pinned locals */
2235 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_FNPTR:
2242 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2243 * in native int. (#688008).
2245 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2248 case MONO_TYPE_CLASS:
2249 case MONO_TYPE_STRING:
2250 case MONO_TYPE_OBJECT:
2251 case MONO_TYPE_SZARRAY:
2252 case MONO_TYPE_ARRAY:
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 if (arg->type != STACK_I8)
2263 if (arg->type != cfg->r4_stack_type)
2267 if (arg->type != STACK_R8)
2270 case MONO_TYPE_VALUETYPE:
2271 if (arg->type != STACK_VTYPE)
2273 klass = mono_class_from_mono_type (simple_type);
2274 if (klass != arg->klass)
2277 case MONO_TYPE_TYPEDBYREF:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_GENERICINST:
2285 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 if (klass != arg->klass)
2293 if (arg->type != STACK_OBJ)
2295 /* FIXME: check type compatibility */
2299 case MONO_TYPE_MVAR:
2300 g_assert (cfg->generic_sharing_context);
2301 if (mini_type_var_is_vt (cfg, simple_type)) {
2302 if (arg->type != STACK_VTYPE)
2305 if (arg->type != STACK_OBJ)
2310 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2316 * Prepare arguments for passing to a function call.
2317 * Return a non-zero value if the arguments can't be passed to the given
2319 * The type checks are not yet complete and some conversions may need
2320 * casts on 32 or 64 bit architectures.
2322 * FIXME: implement this using target_type_is_incompatible ()
2325 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2327 MonoType *simple_type;
2331 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2335 for (i = 0; i < sig->param_count; ++i) {
2336 if (sig->params [i]->byref) {
2337 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2341 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2343 switch (simple_type->type) {
2344 case MONO_TYPE_VOID:
2353 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2359 case MONO_TYPE_FNPTR:
2360 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2363 case MONO_TYPE_CLASS:
2364 case MONO_TYPE_STRING:
2365 case MONO_TYPE_OBJECT:
2366 case MONO_TYPE_SZARRAY:
2367 case MONO_TYPE_ARRAY:
2368 if (args [i]->type != STACK_OBJ)
2373 if (args [i]->type != STACK_I8)
2377 if (args [i]->type != cfg->r4_stack_type)
2381 if (args [i]->type != STACK_R8)
2384 case MONO_TYPE_VALUETYPE:
2385 if (simple_type->data.klass->enumtype) {
2386 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2389 if (args [i]->type != STACK_VTYPE)
2392 case MONO_TYPE_TYPEDBYREF:
2393 if (args [i]->type != STACK_VTYPE)
2396 case MONO_TYPE_GENERICINST:
2397 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2400 case MONO_TYPE_MVAR:
2402 if (args [i]->type != STACK_VTYPE)
2406 g_error ("unknown type 0x%02x in check_call_signature",
2414 callvirt_to_call (int opcode)
2417 case OP_CALL_MEMBASE:
2419 case OP_VOIDCALL_MEMBASE:
2421 case OP_FCALL_MEMBASE:
2423 case OP_RCALL_MEMBASE:
2425 case OP_VCALL_MEMBASE:
2427 case OP_LCALL_MEMBASE:
2430 g_assert_not_reached ();
2436 /* Either METHOD or IMT_ARG needs to be set */
2438 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2442 if (COMPILE_LLVM (cfg)) {
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2458 call->imt_arg_reg = method_reg;
2460 #ifdef MONO_ARCH_IMT_REG
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2463 /* Need this to keep the IMT arg alive */
2464 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2469 #ifdef MONO_ARCH_IMT_REG
2470 method_reg = alloc_preg (cfg);
2473 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2474 } else if (cfg->compile_aot) {
2475 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2478 MONO_INST_NEW (cfg, ins, OP_PCONST);
2479 ins->inst_p0 = method;
2480 ins->dreg = method_reg;
2481 MONO_ADD_INS (cfg->cbb, ins);
2484 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2486 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2490 static MonoJumpInfo *
2491 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2493 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2497 ji->data.target = target;
2503 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2505 if (cfg->generic_sharing_context)
2506 return mono_class_check_context_used (klass);
2512 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2514 if (cfg->generic_sharing_context)
2515 return mono_method_check_context_used (method);
2521 * check_method_sharing:
2523 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2526 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2528 gboolean pass_vtable = FALSE;
2529 gboolean pass_mrgctx = FALSE;
2531 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2532 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2533 gboolean sharable = FALSE;
2535 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2538 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2539 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2540 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2542 sharable = sharing_enabled && context_sharable;
2546 * Pass vtable iff target method might
2547 * be shared, which means that sharing
2548 * is enabled for its class and its
2549 * context is sharable (and it's not a
2552 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2556 if (mini_method_get_context (cmethod) &&
2557 mini_method_get_context (cmethod)->method_inst) {
2558 g_assert (!pass_vtable);
2560 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2563 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2564 MonoGenericContext *context = mini_method_get_context (cmethod);
2565 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2567 if (sharing_enabled && context_sharable)
2569 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2574 if (out_pass_vtable)
2575 *out_pass_vtable = pass_vtable;
2576 if (out_pass_mrgctx)
2577 *out_pass_mrgctx = pass_mrgctx;
2580 inline static MonoCallInst *
2581 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2582 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2586 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2591 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2593 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2595 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2598 call->signature = sig;
2599 call->rgctx_reg = rgctx;
2600 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2602 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2605 if (mini_type_is_vtype (cfg, sig_ret)) {
2606 call->vret_var = cfg->vret_addr;
2607 //g_assert_not_reached ();
2609 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2610 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2613 temp->backend.is_pinvoke = sig->pinvoke;
2616 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2617 * address of return value to increase optimization opportunities.
2618 * Before vtype decomposition, the dreg of the call ins itself represents the
2619 * fact the call modifies the return value. After decomposition, the call will
2620 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2621 * will be transformed into an LDADDR.
2623 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2624 loada->dreg = alloc_preg (cfg);
2625 loada->inst_p0 = temp;
2626 /* We reference the call too since call->dreg could change during optimization */
2627 loada->inst_p1 = call;
2628 MONO_ADD_INS (cfg->cbb, loada);
2630 call->inst.dreg = temp->dreg;
2632 call->vret_var = loada;
2633 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2634 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2636 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2637 if (COMPILE_SOFT_FLOAT (cfg)) {
2639 * If the call has a float argument, we would need to do an r8->r4 conversion using
2640 * an icall, but that cannot be done during the call sequence since it would clobber
2641 * the call registers + the stack. So we do it before emitting the call.
2643 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2645 MonoInst *in = call->args [i];
2647 if (i >= sig->hasthis)
2648 t = sig->params [i - sig->hasthis];
2650 t = &mono_defaults.int_class->byval_arg;
2651 t = mono_type_get_underlying_type (t);
2653 if (!t->byref && t->type == MONO_TYPE_R4) {
2654 MonoInst *iargs [1];
2658 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2660 /* The result will be in an int vreg */
2661 call->args [i] = conv;
2667 call->need_unbox_trampoline = unbox_trampoline;
2670 if (COMPILE_LLVM (cfg))
2671 mono_llvm_emit_call (cfg, call);
2673 mono_arch_emit_call (cfg, call);
2675 mono_arch_emit_call (cfg, call);
2678 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2679 cfg->flags |= MONO_CFG_HAS_CALLS;
2685 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2687 #ifdef MONO_ARCH_RGCTX_REG
2688 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2689 cfg->uses_rgctx_reg = TRUE;
2690 call->rgctx_reg = TRUE;
2692 call->rgctx_arg_reg = rgctx_reg;
2699 inline static MonoInst*
2700 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2705 gboolean check_sp = FALSE;
2707 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2708 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2710 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2715 rgctx_reg = mono_alloc_preg (cfg);
2716 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2720 if (!cfg->stack_inbalance_var)
2721 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2723 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2724 ins->dreg = cfg->stack_inbalance_var->dreg;
2725 MONO_ADD_INS (cfg->cbb, ins);
2728 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2730 call->inst.sreg1 = addr->dreg;
2733 emit_imt_argument (cfg, call, NULL, imt_arg);
2735 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2740 sp_reg = mono_alloc_preg (cfg);
2742 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2744 MONO_ADD_INS (cfg->cbb, ins);
2746 /* Restore the stack so we don't crash when throwing the exception */
2747 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2748 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2749 MONO_ADD_INS (cfg->cbb, ins);
2751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2752 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2756 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2758 return (MonoInst*)call;
2762 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2765 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2767 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2770 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2771 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2773 #ifndef DISABLE_REMOTING
2774 gboolean might_be_remote = FALSE;
2776 gboolean virtual = this != NULL;
2777 gboolean enable_for_aot = TRUE;
2781 gboolean need_unbox_trampoline;
2784 sig = mono_method_signature (method);
2787 rgctx_reg = mono_alloc_preg (cfg);
2788 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2791 if (method->string_ctor) {
2792 /* Create the real signature */
2793 /* FIXME: Cache these */
2794 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2795 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2800 context_used = mini_method_check_context_used (cfg, method);
2802 #ifndef DISABLE_REMOTING
2803 might_be_remote = this && sig->hasthis &&
2804 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2805 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2807 if (might_be_remote && context_used) {
2810 g_assert (cfg->generic_sharing_context);
2812 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2814 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2818 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2820 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2822 #ifndef DISABLE_REMOTING
2823 if (might_be_remote)
2824 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2827 call->method = method;
2828 call->inst.flags |= MONO_INST_HAS_METHOD;
2829 call->inst.inst_left = this;
2830 call->tail_call = tail;
2833 int vtable_reg, slot_reg, this_reg;
2836 this_reg = this->dreg;
2838 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2839 MonoInst *dummy_use;
2841 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2843 /* Make a call to delegate->invoke_impl */
2844 call->inst.inst_basereg = this_reg;
2845 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2846 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2848 /* We must emit a dummy use here because the delegate trampoline will
2849 replace the 'this' argument with the delegate target making this activation
2850 no longer a root for the delegate.
2851 This is an issue for delegates that target collectible code such as dynamic
2852 methods of GC'able assemblies.
2854 For a test case look into #667921.
2856 FIXME: a dummy use is not the best way to do it as the local register allocator
2857 will put it on a caller save register and spil it around the call.
2858 Ideally, we would either put it on a callee save register or only do the store part.
2860 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2862 return (MonoInst*)call;
2865 if ((!cfg->compile_aot || enable_for_aot) &&
2866 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2867 (MONO_METHOD_IS_FINAL (method) &&
2868 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2869 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2871 * the method is not virtual, we just need to ensure this is not null
2872 * and then we can call the method directly.
2874 #ifndef DISABLE_REMOTING
2875 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2877 * The check above ensures method is not gshared, this is needed since
2878 * gshared methods can't have wrappers.
2880 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2884 if (!method->string_ctor)
2885 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2887 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2888 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2890 * the method is virtual, but we can statically dispatch since either
2891 * it's class or the method itself are sealed.
2892 * But first we need to ensure it's not a null reference.
2894 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2896 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2898 vtable_reg = alloc_preg (cfg);
2899 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2900 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2903 guint32 imt_slot = mono_method_get_imt_slot (method);
2904 emit_imt_argument (cfg, call, call->method, imt_arg);
2905 slot_reg = vtable_reg;
2906 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2908 if (slot_reg == -1) {
2909 slot_reg = alloc_preg (cfg);
2910 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2911 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2914 slot_reg = vtable_reg;
2915 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2916 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2918 g_assert (mono_method_signature (method)->generic_param_count);
2919 emit_imt_argument (cfg, call, call->method, imt_arg);
2923 call->inst.sreg1 = slot_reg;
2924 call->inst.inst_offset = offset;
2925 call->virtual = TRUE;
2929 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2932 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2934 return (MonoInst*)call;
2938 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2940 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2944 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2951 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2954 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2956 return (MonoInst*)call;
2960 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2962 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2966 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2970 * mono_emit_abs_call:
2972 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2974 inline static MonoInst*
2975 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2976 MonoMethodSignature *sig, MonoInst **args)
2978 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2982 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2985 if (cfg->abs_patches == NULL)
2986 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2987 g_hash_table_insert (cfg->abs_patches, ji, ji);
2988 ins = mono_emit_native_call (cfg, ji, sig, args);
2989 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2994 direct_icalls_enabled (MonoCompile *cfg)
2996 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2998 if (cfg->compile_llvm)
3001 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3007 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3010 * Call the jit icall without a wrapper if possible.
3011 * The wrapper is needed for the following reasons:
3012 * - to handle exceptions thrown using mono_raise_exceptions () from the
3013 * icall function. The EH code needs the lmf frame pushed by the
3014 * wrapper to be able to unwind back to managed code.
3015 * - to be able to do stack walks for asynchronously suspended
3016 * threads when debugging.
3018 if (info->no_raise && direct_icalls_enabled (cfg)) {
3022 if (!info->wrapper_method) {
3023 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3024 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3026 mono_memory_barrier ();
3030 * Inline the wrapper method, which is basically a call to the C icall, and
3031 * an exception check.
3033 costs = inline_method (cfg, info->wrapper_method, NULL,
3034 args, NULL, cfg->real_offset, TRUE, out_cbb);
3035 g_assert (costs > 0);
3036 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3040 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3045 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3047 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3048 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3052 * Native code might return non register sized integers
3053 * without initializing the upper bits.
3055 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3056 case OP_LOADI1_MEMBASE:
3057 widen_op = OP_ICONV_TO_I1;
3059 case OP_LOADU1_MEMBASE:
3060 widen_op = OP_ICONV_TO_U1;
3062 case OP_LOADI2_MEMBASE:
3063 widen_op = OP_ICONV_TO_I2;
3065 case OP_LOADU2_MEMBASE:
3066 widen_op = OP_ICONV_TO_U2;
3072 if (widen_op != -1) {
3073 int dreg = alloc_preg (cfg);
3076 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3077 widen->type = ins->type;
3087 get_memcpy_method (void)
3089 static MonoMethod *memcpy_method = NULL;
3090 if (!memcpy_method) {
3091 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3093 g_error ("Old corlib found. Install a new one");
3095 return memcpy_method;
3099 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3101 MonoClassField *field;
3102 gpointer iter = NULL;
3104 while ((field = mono_class_get_fields (klass, &iter))) {
3107 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3109 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3110 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3111 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3112 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3114 MonoClass *field_class = mono_class_from_mono_type (field->type);
3115 if (field_class->has_references)
3116 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3122 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3124 int card_table_shift_bits;
3125 gpointer card_table_mask;
3127 MonoInst *dummy_use;
3128 int nursery_shift_bits;
3129 size_t nursery_size;
3130 gboolean has_card_table_wb = FALSE;
3132 if (!cfg->gen_write_barriers)
3135 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3137 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3139 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3140 has_card_table_wb = TRUE;
3143 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3146 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3147 wbarrier->sreg1 = ptr->dreg;
3148 wbarrier->sreg2 = value->dreg;
3149 MONO_ADD_INS (cfg->cbb, wbarrier);
3150 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3151 int offset_reg = alloc_preg (cfg);
3152 int card_reg = alloc_preg (cfg);
3155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3156 if (card_table_mask)
3157 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3159 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3160 * IMM's larger than 32bits.
3162 if (cfg->compile_aot) {
3163 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3165 MONO_INST_NEW (cfg, ins, OP_PCONST);
3166 ins->inst_p0 = card_table;
3167 ins->dreg = card_reg;
3168 MONO_ADD_INS (cfg->cbb, ins);
3171 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3172 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3174 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3175 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3178 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3182 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3184 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3185 unsigned need_wb = 0;
3190 /*types with references can't have alignment smaller than sizeof(void*) */
3191 if (align < SIZEOF_VOID_P)
3194 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3195 if (size > 32 * SIZEOF_VOID_P)
3198 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3200 /* We don't unroll more than 5 stores to avoid code bloat. */
3201 if (size > 5 * SIZEOF_VOID_P) {
3202 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3203 size += (SIZEOF_VOID_P - 1);
3204 size &= ~(SIZEOF_VOID_P - 1);
3206 EMIT_NEW_ICONST (cfg, iargs [2], size);
3207 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3208 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3212 destreg = iargs [0]->dreg;
3213 srcreg = iargs [1]->dreg;
3216 dest_ptr_reg = alloc_preg (cfg);
3217 tmp_reg = alloc_preg (cfg);
3220 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3222 while (size >= SIZEOF_VOID_P) {
3223 MonoInst *load_inst;
3224 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3225 load_inst->dreg = tmp_reg;
3226 load_inst->inst_basereg = srcreg;
3227 load_inst->inst_offset = offset;
3228 MONO_ADD_INS (cfg->cbb, load_inst);
3230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3233 emit_write_barrier (cfg, iargs [0], load_inst);
3235 offset += SIZEOF_VOID_P;
3236 size -= SIZEOF_VOID_P;
3239 /*tmp += sizeof (void*)*/
3240 if (size >= SIZEOF_VOID_P) {
3241 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3242 MONO_ADD_INS (cfg->cbb, iargs [0]);
3246 /* Those cannot be references since size < sizeof (void*) */
3248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3249 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3255 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3256 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3262 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3272 * Emit code to copy a valuetype of type @klass whose address is stored in
3273 * @src->dreg to memory whose address is stored at @dest->dreg.
3276 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3278 MonoInst *iargs [4];
3281 MonoMethod *memcpy_method;
3282 MonoInst *size_ins = NULL;
3283 MonoInst *memcpy_ins = NULL;
3286 if (cfg->generic_sharing_context)
3287 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3290 * This check breaks with spilled vars... need to handle it during verification anyway.
3291 * g_assert (klass && klass == src->klass && klass == dest->klass);
3294 if (mini_is_gsharedvt_klass (cfg, klass)) {
3296 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3297 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3301 n = mono_class_native_size (klass, &align);
3303 n = mono_class_value_size (klass, &align);
3305 /* if native is true there should be no references in the struct */
3306 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3307 /* Avoid barriers when storing to the stack */
3308 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3309 (dest->opcode == OP_LDADDR))) {
3315 context_used = mini_class_check_context_used (cfg, klass);
3317 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3318 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3320 } else if (context_used) {
3321 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3323 if (cfg->compile_aot) {
3324 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3326 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3327 mono_class_compute_gc_descriptor (klass);
3332 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3334 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3339 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3340 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3341 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3346 iargs [2] = size_ins;
3348 EMIT_NEW_ICONST (cfg, iargs [2], n);
3350 memcpy_method = get_memcpy_method ();
3352 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3354 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3359 get_memset_method (void)
3361 static MonoMethod *memset_method = NULL;
3362 if (!memset_method) {
3363 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3365 g_error ("Old corlib found. Install a new one");
3367 return memset_method;
3371 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3373 MonoInst *iargs [3];
3376 MonoMethod *memset_method;
3377 MonoInst *size_ins = NULL;
3378 MonoInst *bzero_ins = NULL;
3379 static MonoMethod *bzero_method;
3381 /* FIXME: Optimize this for the case when dest is an LDADDR */
3382 mono_class_init (klass);
3383 if (mini_is_gsharedvt_klass (cfg, klass)) {
3384 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3385 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3387 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3388 g_assert (bzero_method);
3390 iargs [1] = size_ins;
3391 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3395 n = mono_class_value_size (klass, &align);
3397 if (n <= sizeof (gpointer) * 8) {
3398 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3401 memset_method = get_memset_method ();
3403 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3404 EMIT_NEW_ICONST (cfg, iargs [2], n);
3405 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3410 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3412 MonoInst *this = NULL;
3414 g_assert (cfg->generic_sharing_context);
3416 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3417 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3418 !method->klass->valuetype)
3419 EMIT_NEW_ARGLOAD (cfg, this, 0);
3421 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3422 MonoInst *mrgctx_loc, *mrgctx_var;
3425 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3427 mrgctx_loc = mono_get_vtable_var (cfg);
3428 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3431 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3432 MonoInst *vtable_loc, *vtable_var;
3436 vtable_loc = mono_get_vtable_var (cfg);
3437 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3439 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3440 MonoInst *mrgctx_var = vtable_var;
3443 vtable_reg = alloc_preg (cfg);
3444 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3445 vtable_var->type = STACK_PTR;
3453 vtable_reg = alloc_preg (cfg);
3454 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3459 static MonoJumpInfoRgctxEntry *
3460 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3462 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3463 res->method = method;
3464 res->in_mrgctx = in_mrgctx;
3465 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3466 res->data->type = patch_type;
3467 res->data->data.target = patch_data;
3468 res->info_type = info_type;
3473 static inline MonoInst*
3474 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3476 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3480 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3481 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3483 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3484 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3486 return emit_rgctx_fetch (cfg, rgctx, entry);
3490 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3491 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3493 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3494 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3496 return emit_rgctx_fetch (cfg, rgctx, entry);
3500 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3501 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3503 MonoJumpInfoGSharedVtCall *call_info;
3504 MonoJumpInfoRgctxEntry *entry;
3507 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3508 call_info->sig = sig;
3509 call_info->method = cmethod;
3511 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3512 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3514 return emit_rgctx_fetch (cfg, rgctx, entry);
3518 * emit_get_rgctx_virt_method:
3520 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3523 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3524 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3526 MonoJumpInfoVirtMethod *info;
3527 MonoJumpInfoRgctxEntry *entry;
3530 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3531 info->klass = klass;
3532 info->method = virt_method;
3534 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3535 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3537 return emit_rgctx_fetch (cfg, rgctx, entry);
3541 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3542 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3544 MonoJumpInfoRgctxEntry *entry;
3547 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3548 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3550 return emit_rgctx_fetch (cfg, rgctx, entry);
3554 * emit_get_rgctx_method:
3556 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3557 * normal constants, else emit a load from the rgctx.
3560 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3561 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3563 if (!context_used) {
3566 switch (rgctx_type) {
3567 case MONO_RGCTX_INFO_METHOD:
3568 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3570 case MONO_RGCTX_INFO_METHOD_RGCTX:
3571 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3574 g_assert_not_reached ();
3577 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3578 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3580 return emit_rgctx_fetch (cfg, rgctx, entry);
3585 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3586 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3588 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3589 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3591 return emit_rgctx_fetch (cfg, rgctx, entry);
3595 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3597 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3598 MonoRuntimeGenericContextInfoTemplate *template;
3603 for (i = 0; i < info->num_entries; ++i) {
3604 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3606 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3610 if (info->num_entries == info->count_entries) {
3611 MonoRuntimeGenericContextInfoTemplate *new_entries;
3612 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3614 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3616 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3617 info->entries = new_entries;
3618 info->count_entries = new_count_entries;
3621 idx = info->num_entries;
3622 template = &info->entries [idx];
3623 template->info_type = rgctx_type;
3624 template->data = data;
3626 info->num_entries ++;
3632 * emit_get_gsharedvt_info:
3634 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3637 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3642 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3643 /* Load info->entries [idx] */
3644 dreg = alloc_preg (cfg);
3645 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3651 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3653 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3657 * On return the caller must check @klass for load errors.
3660 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3662 MonoInst *vtable_arg;
3666 context_used = mini_class_check_context_used (cfg, klass);
3669 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3670 klass, MONO_RGCTX_INFO_VTABLE);
3672 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3676 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3679 if (COMPILE_LLVM (cfg))
3680 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3682 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3683 #ifdef MONO_ARCH_VTABLE_REG
3684 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3685 cfg->uses_vtable_reg = TRUE;
3692 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3696 if (cfg->gen_seq_points && cfg->method == method) {
3697 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3699 ins->flags |= MONO_INST_NONEMPTY_STACK;
3700 MONO_ADD_INS (cfg->cbb, ins);
3705 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3707 if (mini_get_debug_options ()->better_cast_details) {
3708 int vtable_reg = alloc_preg (cfg);
3709 int klass_reg = alloc_preg (cfg);
3710 MonoBasicBlock *is_null_bb = NULL;
3712 int to_klass_reg, context_used;
3715 NEW_BBLOCK (cfg, is_null_bb);
3717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3721 tls_get = mono_get_jit_tls_intrinsic (cfg);
3723 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3727 MONO_ADD_INS (cfg->cbb, tls_get);
3728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3733 context_used = mini_class_check_context_used (cfg, klass);
3735 MonoInst *class_ins;
3737 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3738 to_klass_reg = class_ins->dreg;
3740 to_klass_reg = alloc_preg (cfg);
3741 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3746 MONO_START_BB (cfg, is_null_bb);
3748 *out_bblock = cfg->cbb;
3754 reset_cast_details (MonoCompile *cfg)
3756 /* Reset the variables holding the cast details */
3757 if (mini_get_debug_options ()->better_cast_details) {
3758 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3760 MONO_ADD_INS (cfg->cbb, tls_get);
3761 /* It is enough to reset the from field */
3762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3767 * On return the caller must check @array_class for load errors
3770 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3772 int vtable_reg = alloc_preg (cfg);
3775 context_used = mini_class_check_context_used (cfg, array_class);
3777 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3779 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3781 if (cfg->opt & MONO_OPT_SHARED) {
3782 int class_reg = alloc_preg (cfg);
3783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3784 if (cfg->compile_aot) {
3785 int klass_reg = alloc_preg (cfg);
3786 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3787 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3791 } else if (context_used) {
3792 MonoInst *vtable_ins;
3794 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3795 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3797 if (cfg->compile_aot) {
3801 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3803 vt_reg = alloc_preg (cfg);
3804 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3805 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3808 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3814 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3816 reset_cast_details (cfg);
3820 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3821 * generic code is generated.
3824 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3826 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3829 MonoInst *rgctx, *addr;
3831 /* FIXME: What if the class is shared? We might not
3832 have to get the address of the method from the
3834 addr = emit_get_rgctx_method (cfg, context_used, method,
3835 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3837 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3839 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3841 gboolean pass_vtable, pass_mrgctx;
3842 MonoInst *rgctx_arg = NULL;
3844 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3845 g_assert (!pass_mrgctx);
3848 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3851 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3854 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3859 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3863 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3864 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3865 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3866 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3868 obj_reg = sp [0]->dreg;
3869 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3872 /* FIXME: generics */
3873 g_assert (klass->rank == 0);
3876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3877 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3879 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3883 MonoInst *element_class;
3885 /* This assertion is from the unboxcast insn */
3886 g_assert (klass->rank == 0);
3888 element_class = emit_get_rgctx_klass (cfg, context_used,
3889 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3891 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3892 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3894 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3895 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3896 reset_cast_details (cfg);
3899 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3900 MONO_ADD_INS (cfg->cbb, add);
3901 add->type = STACK_MP;
3908 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3910 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3911 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3915 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3921 args [1] = klass_inst;
3924 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3926 NEW_BBLOCK (cfg, is_ref_bb);
3927 NEW_BBLOCK (cfg, is_nullable_bb);
3928 NEW_BBLOCK (cfg, end_bb);
3929 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3930 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3931 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3936 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3937 addr_reg = alloc_dreg (cfg, STACK_MP);
3941 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3942 MONO_ADD_INS (cfg->cbb, addr);
3944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3947 MONO_START_BB (cfg, is_ref_bb);
3949 /* Save the ref to a temporary */
3950 dreg = alloc_ireg (cfg);
3951 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3952 addr->dreg = addr_reg;
3953 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3957 MONO_START_BB (cfg, is_nullable_bb);
3960 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3961 MonoInst *unbox_call;
3962 MonoMethodSignature *unbox_sig;
3964 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3965 unbox_sig->ret = &klass->byval_arg;
3966 unbox_sig->param_count = 1;
3967 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3968 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3970 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3971 addr->dreg = addr_reg;
3974 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3977 MONO_START_BB (cfg, end_bb);
3980 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3982 *out_cbb = cfg->cbb;
3988 * Returns NULL and set the cfg exception on error.
3991 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3993 MonoInst *iargs [2];
3999 MonoInst *iargs [2];
4000 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
4002 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4004 if (cfg->opt & MONO_OPT_SHARED)
4005 rgctx_info = MONO_RGCTX_INFO_KLASS;
4007 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4008 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4010 if (cfg->opt & MONO_OPT_SHARED) {
4011 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4013 alloc_ftn = mono_object_new;
4016 alloc_ftn = mono_object_new_specific;
4019 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4020 if (known_instance_size) {
4021 int size = mono_class_instance_size (klass);
4023 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4025 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4028 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4031 if (cfg->opt & MONO_OPT_SHARED) {
4032 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4033 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4035 alloc_ftn = mono_object_new;
4036 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4037 /* This happens often in argument checking code, eg. throw new FooException... */
4038 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4039 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4040 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4042 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4043 MonoMethod *managed_alloc = NULL;
4047 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4048 cfg->exception_ptr = klass;
4052 #ifndef MONO_CROSS_COMPILE
4053 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4056 if (managed_alloc) {
4057 int size = mono_class_instance_size (klass);
4059 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4060 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4061 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4063 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4065 guint32 lw = vtable->klass->instance_size;
4066 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4067 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4068 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4071 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4075 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4079 * Returns NULL and set the cfg exception on error.
4082 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4084 MonoInst *alloc, *ins;
4086 *out_cbb = cfg->cbb;
4088 if (mono_class_is_nullable (klass)) {
4089 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4092 /* FIXME: What if the class is shared? We might not
4093 have to get the method address from the RGCTX. */
4094 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4096 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4098 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4100 gboolean pass_vtable, pass_mrgctx;
4101 MonoInst *rgctx_arg = NULL;
4103 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4104 g_assert (!pass_mrgctx);
4107 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4110 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4113 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4117 if (mini_is_gsharedvt_klass (cfg, klass)) {
4118 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4119 MonoInst *res, *is_ref, *src_var, *addr;
4122 dreg = alloc_ireg (cfg);
4124 NEW_BBLOCK (cfg, is_ref_bb);
4125 NEW_BBLOCK (cfg, is_nullable_bb);
4126 NEW_BBLOCK (cfg, end_bb);
4127 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4135 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4138 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4139 ins->opcode = OP_STOREV_MEMBASE;
4141 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4142 res->type = STACK_OBJ;
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4147 MONO_START_BB (cfg, is_ref_bb);
4149 /* val is a vtype, so has to load the value manually */
4150 src_var = get_vreg_to_inst (cfg, val->dreg);
4152 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4153 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4158 MONO_START_BB (cfg, is_nullable_bb);
4161 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4162 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4164 MonoMethodSignature *box_sig;
4167 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4168 * construct that method at JIT time, so have to do things by hand.
4170 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4171 box_sig->ret = &mono_defaults.object_class->byval_arg;
4172 box_sig->param_count = 1;
4173 box_sig->params [0] = &klass->byval_arg;
4174 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4175 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4176 res->type = STACK_OBJ;
4180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4182 MONO_START_BB (cfg, end_bb);
4184 *out_cbb = cfg->cbb;
4188 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4192 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4198 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4201 MonoGenericContainer *container;
4202 MonoGenericInst *ginst;
4204 if (klass->generic_class) {
4205 container = klass->generic_class->container_class->generic_container;
4206 ginst = klass->generic_class->context.class_inst;
4207 } else if (klass->generic_container && context_used) {
4208 container = klass->generic_container;
4209 ginst = container->context.class_inst;
4214 for (i = 0; i < container->type_argc; ++i) {
4216 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4218 type = ginst->type_argv [i];
4219 if (mini_type_is_reference (cfg, type))
4225 static GHashTable* direct_icall_type_hash;
4228 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4230 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4231 if (!direct_icalls_enabled (cfg))
4235 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4236 * Whitelist a few icalls for now.
4238 if (!direct_icall_type_hash) {
4239 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4241 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4242 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4243 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4244 mono_memory_barrier ();
4245 direct_icall_type_hash = h;
4248 if (cmethod->klass == mono_defaults.math_class)
4250 /* No locking needed */
4251 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4256 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4259 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4261 MonoMethod *mono_castclass;
4264 mono_castclass = mono_marshal_get_castclass_with_cache ();
4266 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4267 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4268 reset_cast_details (cfg);
4269 *out_bblock = cfg->cbb;
4275 get_castclass_cache_idx (MonoCompile *cfg)
4277 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4278 cfg->castclass_cache_index ++;
4279 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4283 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4292 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4295 if (cfg->compile_aot) {
4296 idx = get_castclass_cache_idx (cfg);
4297 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4299 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4302 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4304 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4308 * Returns NULL and set the cfg exception on error.
4311 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4313 MonoBasicBlock *is_null_bb;
4314 int obj_reg = src->dreg;
4315 int vtable_reg = alloc_preg (cfg);
4317 MonoInst *klass_inst = NULL, *res;
4318 MonoBasicBlock *bblock;
4322 context_used = mini_class_check_context_used (cfg, klass);
4324 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4325 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4326 (*inline_costs) += 2;
4329 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4330 MonoMethod *mono_castclass;
4331 MonoInst *iargs [1];
4334 mono_castclass = mono_marshal_get_castclass (klass);
4337 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4338 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4339 iargs, ip, cfg->real_offset, TRUE, &bblock);
4340 reset_cast_details (cfg);
4341 CHECK_CFG_EXCEPTION;
4342 g_assert (costs > 0);
4344 cfg->real_offset += 5;
4346 (*inline_costs) += costs;
4355 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4356 MonoInst *cache_ins;
4358 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4363 /* klass - it's the second element of the cache entry*/
4364 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4367 args [2] = cache_ins;
4369 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4372 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4375 NEW_BBLOCK (cfg, is_null_bb);
4377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4380 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4382 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4384 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4386 int klass_reg = alloc_preg (cfg);
4388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4390 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4391 /* the remoting code is broken, access the class for now */
4392 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4393 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4395 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4396 cfg->exception_ptr = klass;
4399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4404 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4407 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4411 MONO_START_BB (cfg, is_null_bb);
4413 reset_cast_details (cfg);
4424 * Returns NULL and set the cfg exception on error.
4427 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4430 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4431 int obj_reg = src->dreg;
4432 int vtable_reg = alloc_preg (cfg);
4433 int res_reg = alloc_ireg_ref (cfg);
4434 MonoInst *klass_inst = NULL;
4439 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4440 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4441 MonoInst *cache_ins;
4443 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4448 /* klass - it's the second element of the cache entry*/
4449 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4452 args [2] = cache_ins;
4454 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4457 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4460 NEW_BBLOCK (cfg, is_null_bb);
4461 NEW_BBLOCK (cfg, false_bb);
4462 NEW_BBLOCK (cfg, end_bb);
4464 /* Do the assignment at the beginning, so the other assignment can be if converted */
4465 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4466 ins->type = STACK_OBJ;
4469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4474 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4475 g_assert (!context_used);
4476 /* the is_null_bb target simply copies the input register to the output */
4477 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4479 int klass_reg = alloc_preg (cfg);
4482 int rank_reg = alloc_preg (cfg);
4483 int eclass_reg = alloc_preg (cfg);
4485 g_assert (!context_used);
4486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4491 if (klass->cast_class == mono_defaults.object_class) {
4492 int parent_reg = alloc_preg (cfg);
4493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4494 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4495 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4497 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4498 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4499 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4501 } else if (klass->cast_class == mono_defaults.enum_class) {
4502 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4504 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4505 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4507 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4508 /* Check that the object is a vector too */
4509 int bounds_reg = alloc_preg (cfg);
4510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4515 /* the is_null_bb target simply copies the input register to the output */
4516 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4518 } else if (mono_class_is_nullable (klass)) {
4519 g_assert (!context_used);
4520 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4521 /* the is_null_bb target simply copies the input register to the output */
4522 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4524 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4525 g_assert (!context_used);
4526 /* the remoting code is broken, access the class for now */
4527 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4528 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4530 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4531 cfg->exception_ptr = klass;
4534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4543 /* the is_null_bb target simply copies the input register to the output */
4544 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4549 MONO_START_BB (cfg, false_bb);
4551 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4554 MONO_START_BB (cfg, is_null_bb);
4556 MONO_START_BB (cfg, end_bb);
4562 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4564 /* This opcode takes as input an object reference and a class, and returns:
4565 0) if the object is an instance of the class,
4566 1) if the object is not instance of the class,
4567 2) if the object is a proxy whose type cannot be determined */
4570 #ifndef DISABLE_REMOTING
4571 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4573 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4575 int obj_reg = src->dreg;
4576 int dreg = alloc_ireg (cfg);
4578 #ifndef DISABLE_REMOTING
4579 int klass_reg = alloc_preg (cfg);
4582 NEW_BBLOCK (cfg, true_bb);
4583 NEW_BBLOCK (cfg, false_bb);
4584 NEW_BBLOCK (cfg, end_bb);
4585 #ifndef DISABLE_REMOTING
4586 NEW_BBLOCK (cfg, false2_bb);
4587 NEW_BBLOCK (cfg, no_proxy_bb);
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4593 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4594 #ifndef DISABLE_REMOTING
4595 NEW_BBLOCK (cfg, interface_fail_bb);
4598 tmp_reg = alloc_preg (cfg);
4599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4600 #ifndef DISABLE_REMOTING
4601 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4602 MONO_START_BB (cfg, interface_fail_bb);
4603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4605 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4607 tmp_reg = alloc_preg (cfg);
4608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4612 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4615 #ifndef DISABLE_REMOTING
4616 tmp_reg = alloc_preg (cfg);
4617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4620 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4621 tmp_reg = alloc_preg (cfg);
4622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4625 tmp_reg = alloc_preg (cfg);
4626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4630 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4633 MONO_START_BB (cfg, no_proxy_bb);
4635 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4637 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4641 MONO_START_BB (cfg, false_bb);
4643 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4646 #ifndef DISABLE_REMOTING
4647 MONO_START_BB (cfg, false2_bb);
4649 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4650 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4653 MONO_START_BB (cfg, true_bb);
4655 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4657 MONO_START_BB (cfg, end_bb);
4660 MONO_INST_NEW (cfg, ins, OP_ICONST);
4662 ins->type = STACK_I4;
4668 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4670 /* This opcode takes as input an object reference and a class, and returns:
4671 0) if the object is an instance of the class,
4672 1) if the object is a proxy whose type cannot be determined
4673 an InvalidCastException exception is thrown otherwhise*/
4676 #ifndef DISABLE_REMOTING
4677 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4679 MonoBasicBlock *ok_result_bb;
4681 int obj_reg = src->dreg;
4682 int dreg = alloc_ireg (cfg);
4683 int tmp_reg = alloc_preg (cfg);
4685 #ifndef DISABLE_REMOTING
4686 int klass_reg = alloc_preg (cfg);
4687 NEW_BBLOCK (cfg, end_bb);
4690 NEW_BBLOCK (cfg, ok_result_bb);
4692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4695 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4697 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4698 #ifndef DISABLE_REMOTING
4699 NEW_BBLOCK (cfg, interface_fail_bb);
4701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4702 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4703 MONO_START_BB (cfg, interface_fail_bb);
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4706 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4708 tmp_reg = alloc_preg (cfg);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4713 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4717 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4721 #ifndef DISABLE_REMOTING
4722 NEW_BBLOCK (cfg, no_proxy_bb);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4726 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4728 tmp_reg = alloc_preg (cfg);
4729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4732 tmp_reg = alloc_preg (cfg);
4733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4735 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4737 NEW_BBLOCK (cfg, fail_1_bb);
4739 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4741 MONO_START_BB (cfg, fail_1_bb);
4743 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4746 MONO_START_BB (cfg, no_proxy_bb);
4748 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4750 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4754 MONO_START_BB (cfg, ok_result_bb);
4756 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4758 #ifndef DISABLE_REMOTING
4759 MONO_START_BB (cfg, end_bb);
4763 MONO_INST_NEW (cfg, ins, OP_ICONST);
4765 ins->type = STACK_I4;
4770 static G_GNUC_UNUSED MonoInst*
4771 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4773 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4774 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4777 switch (enum_type->type) {
4780 #if SIZEOF_REGISTER == 8
4792 MonoInst *load, *and, *cmp, *ceq;
4793 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4794 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4795 int dest_reg = alloc_ireg (cfg);
4797 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4798 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4799 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4800 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4802 ceq->type = STACK_I4;
4805 load = mono_decompose_opcode (cfg, load, NULL);
4806 and = mono_decompose_opcode (cfg, and, NULL);
4807 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4808 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4816 * Returns NULL and set the cfg exception on error.
4818 static G_GNUC_UNUSED MonoInst*
4819 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4823 gpointer trampoline;
4824 MonoInst *obj, *method_ins, *tramp_ins;
4828 // FIXME reenable optimisation for virtual case
4833 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4836 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4840 obj = handle_alloc (cfg, klass, FALSE, 0);
4844 /* Inline the contents of mono_delegate_ctor */
4846 /* Set target field */
4847 /* Optimize away setting of NULL target */
4848 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4850 if (cfg->gen_write_barriers) {
4851 dreg = alloc_preg (cfg);
4852 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4853 emit_write_barrier (cfg, ptr, target);
4857 /* Set method field */
4858 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4862 * To avoid looking up the compiled code belonging to the target method
4863 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4864 * store it, and we fill it after the method has been compiled.
4866 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4867 MonoInst *code_slot_ins;
4870 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4872 domain = mono_domain_get ();
4873 mono_domain_lock (domain);
4874 if (!domain_jit_info (domain)->method_code_hash)
4875 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4876 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4878 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4879 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4881 mono_domain_unlock (domain);
4883 if (cfg->compile_aot)
4884 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4886 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4891 if (cfg->compile_aot) {
4892 MonoDelegateClassMethodPair *del_tramp;
4894 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4895 del_tramp->klass = klass;
4896 del_tramp->method = context_used ? NULL : method;
4897 del_tramp->virtual = virtual;
4898 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4901 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4903 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4904 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4907 /* Set invoke_impl field */
4909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4911 dreg = alloc_preg (cfg);
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4915 dreg = alloc_preg (cfg);
4916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4920 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4926 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4928 MonoJitICallInfo *info;
4930 /* Need to register the icall so it gets an icall wrapper */
4931 info = mono_get_array_new_va_icall (rank);
4933 cfg->flags |= MONO_CFG_HAS_VARARGS;
4935 /* mono_array_new_va () needs a vararg calling convention */
4936 cfg->disable_llvm = TRUE;
4938 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4939 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4943 * handle_constrained_gsharedvt_call:
4945 * Handle constrained calls where the receiver is a gsharedvt type.
4946 * Return the instruction representing the call. Set the cfg exception on failure.
4949 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4950 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4952 MonoInst *ins = NULL;
4953 MonoBasicBlock *bblock = *ref_bblock;
4954 gboolean emit_widen = *ref_emit_widen;
4957 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4958 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4959 * pack the arguments into an array, and do the rest of the work in in an icall.
4961 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4962 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4963 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4964 MonoInst *args [16];
4967 * This case handles calls to
4968 * - object:ToString()/Equals()/GetHashCode(),
4969 * - System.IComparable<T>:CompareTo()
4970 * - System.IEquatable<T>:Equals ()
4971 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4975 if (mono_method_check_context_used (cmethod))
4976 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4978 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4979 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4981 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4982 if (fsig->hasthis && fsig->param_count) {
4983 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4984 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4985 ins->dreg = alloc_preg (cfg);
4986 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4987 MONO_ADD_INS (cfg->cbb, ins);
4990 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4993 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4995 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4996 addr_reg = ins->dreg;
4997 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4999 EMIT_NEW_ICONST (cfg, args [3], 0);
5000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5003 EMIT_NEW_ICONST (cfg, args [3], 0);
5004 EMIT_NEW_ICONST (cfg, args [4], 0);
5006 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5009 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
5010 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
5011 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5015 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5016 MONO_ADD_INS (cfg->cbb, add);
5018 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5019 MONO_ADD_INS (cfg->cbb, ins);
5020 /* ins represents the call result */
5023 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5026 *ref_emit_widen = emit_widen;
5027 *ref_bblock = bblock;
5036 mono_emit_load_got_addr (MonoCompile *cfg)
5038 MonoInst *getaddr, *dummy_use;
5040 if (!cfg->got_var || cfg->got_var_allocated)
5043 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5044 getaddr->cil_code = cfg->header->code;
5045 getaddr->dreg = cfg->got_var->dreg;
5047 /* Add it to the start of the first bblock */
5048 if (cfg->bb_entry->code) {
5049 getaddr->next = cfg->bb_entry->code;
5050 cfg->bb_entry->code = getaddr;
5053 MONO_ADD_INS (cfg->bb_entry, getaddr);
5055 cfg->got_var_allocated = TRUE;
5058 * Add a dummy use to keep the got_var alive, since real uses might
5059 * only be generated by the back ends.
5060 * Add it to end_bblock, so the variable's lifetime covers the whole
5062 * It would be better to make the usage of the got var explicit in all
5063 * cases when the backend needs it (i.e. calls, throw etc.), so this
5064 * wouldn't be needed.
5066 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5067 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5070 static int inline_limit;
5071 static gboolean inline_limit_inited;
5074 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5076 MonoMethodHeaderSummary header;
5078 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5079 MonoMethodSignature *sig = mono_method_signature (method);
5083 if (cfg->disable_inline)
5085 if (cfg->generic_sharing_context)
5088 if (cfg->inline_depth > 10)
5091 #ifdef MONO_ARCH_HAVE_LMF_OPS
5092 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5093 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5094 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5099 if (!mono_method_get_header_summary (method, &header))
5102 /*runtime, icall and pinvoke are checked by summary call*/
5103 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5104 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5105 (mono_class_is_marshalbyref (method->klass)) ||
5109 /* also consider num_locals? */
5110 /* Do the size check early to avoid creating vtables */
5111 if (!inline_limit_inited) {
5112 if (g_getenv ("MONO_INLINELIMIT"))
5113 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5115 inline_limit = INLINE_LENGTH_LIMIT;
5116 inline_limit_inited = TRUE;
5118 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5122 * if we can initialize the class of the method right away, we do,
5123 * otherwise we don't allow inlining if the class needs initialization,
5124 * since it would mean inserting a call to mono_runtime_class_init()
5125 * inside the inlined code
5127 if (!(cfg->opt & MONO_OPT_SHARED)) {
5128 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5129 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5130 vtable = mono_class_vtable (cfg->domain, method->klass);
5133 if (!cfg->compile_aot)
5134 mono_runtime_class_init (vtable);
5135 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5136 if (cfg->run_cctors && method->klass->has_cctor) {
5137 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5138 if (!method->klass->runtime_info)
5139 /* No vtable created yet */
5141 vtable = mono_class_vtable (cfg->domain, method->klass);
5144 /* This makes so that inline cannot trigger */
5145 /* .cctors: too many apps depend on them */
5146 /* running with a specific order... */
5147 if (! vtable->initialized)
5149 mono_runtime_class_init (vtable);
5151 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5152 if (!method->klass->runtime_info)
5153 /* No vtable created yet */
5155 vtable = mono_class_vtable (cfg->domain, method->klass);
5158 if (!vtable->initialized)
5163 * If we're compiling for shared code
5164 * the cctor will need to be run at aot method load time, for example,
5165 * or at the end of the compilation of the inlining method.
5167 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5172 * CAS - do not inline methods with declarative security
5173 * Note: this has to be before any possible return TRUE;
5175 if (mono_security_method_has_declsec (method))
5178 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5179 if (mono_arch_is_soft_float ()) {
5181 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5183 for (i = 0; i < sig->param_count; ++i)
5184 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5189 if (g_list_find (cfg->dont_inline, method))
5196 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5198 if (!cfg->compile_aot) {
5200 if (vtable->initialized)
5204 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5205 if (cfg->method == method)
5209 if (!mono_class_needs_cctor_run (klass, method))
5212 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5213 /* The initialization is already done before the method is called */
5220 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5224 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5227 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5230 mono_class_init (klass);
5231 size = mono_class_array_element_size (klass);
5234 mult_reg = alloc_preg (cfg);
5235 array_reg = arr->dreg;
5236 index_reg = index->dreg;
5238 #if SIZEOF_REGISTER == 8
5239 /* The array reg is 64 bits but the index reg is only 32 */
5240 if (COMPILE_LLVM (cfg)) {
5242 index2_reg = index_reg;
5244 index2_reg = alloc_preg (cfg);
5245 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5248 if (index->type == STACK_I8) {
5249 index2_reg = alloc_preg (cfg);
5250 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5252 index2_reg = index_reg;
5257 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5259 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5260 if (size == 1 || size == 2 || size == 4 || size == 8) {
5261 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5263 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5264 ins->klass = mono_class_get_element_class (klass);
5265 ins->type = STACK_MP;
5271 add_reg = alloc_ireg_mp (cfg);
5274 MonoInst *rgctx_ins;
5277 g_assert (cfg->generic_sharing_context);
5278 context_used = mini_class_check_context_used (cfg, klass);
5279 g_assert (context_used);
5280 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5281 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5283 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5285 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5286 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5287 ins->klass = mono_class_get_element_class (klass);
5288 ins->type = STACK_MP;
5289 MONO_ADD_INS (cfg->cbb, ins);
5294 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5296 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5298 int bounds_reg = alloc_preg (cfg);
5299 int add_reg = alloc_ireg_mp (cfg);
5300 int mult_reg = alloc_preg (cfg);
5301 int mult2_reg = alloc_preg (cfg);
5302 int low1_reg = alloc_preg (cfg);
5303 int low2_reg = alloc_preg (cfg);
5304 int high1_reg = alloc_preg (cfg);
5305 int high2_reg = alloc_preg (cfg);
5306 int realidx1_reg = alloc_preg (cfg);
5307 int realidx2_reg = alloc_preg (cfg);
5308 int sum_reg = alloc_preg (cfg);
5309 int index1, index2, tmpreg;
5313 mono_class_init (klass);
5314 size = mono_class_array_element_size (klass);
5316 index1 = index_ins1->dreg;
5317 index2 = index_ins2->dreg;
5319 #if SIZEOF_REGISTER == 8
5320 /* The array reg is 64 bits but the index reg is only 32 */
5321 if (COMPILE_LLVM (cfg)) {
5324 tmpreg = alloc_preg (cfg);
5325 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5327 tmpreg = alloc_preg (cfg);
5328 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5332 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5336 /* range checking */
5337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5338 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5341 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5342 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5343 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5344 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5345 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5346 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5348 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5349 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5350 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5351 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5352 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5353 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5354 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5356 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5357 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5359 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5360 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5362 ins->type = STACK_MP;
5364 MONO_ADD_INS (cfg->cbb, ins);
5371 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5375 MonoMethod *addr_method;
5377 MonoClass *eclass = cmethod->klass->element_class;
5379 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5382 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5384 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5385 /* emit_ldelema_2 depends on OP_LMUL */
5386 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5387 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5391 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5394 element_size = mono_class_array_element_size (eclass);
5395 addr_method = mono_marshal_get_array_address (rank, element_size);
5396 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5401 static MonoBreakPolicy
5402 always_insert_breakpoint (MonoMethod *method)
5404 return MONO_BREAK_POLICY_ALWAYS;
5407 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5410 * mono_set_break_policy:
5411 * policy_callback: the new callback function
5413 * Allow embedders to decide wherther to actually obey breakpoint instructions
5414 * (both break IL instructions and Debugger.Break () method calls), for example
5415 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5416 * untrusted or semi-trusted code.
5418 * @policy_callback will be called every time a break point instruction needs to
5419 * be inserted with the method argument being the method that calls Debugger.Break()
5420 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5421 * if it wants the breakpoint to not be effective in the given method.
5422 * #MONO_BREAK_POLICY_ALWAYS is the default.
5425 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5427 if (policy_callback)
5428 break_policy_func = policy_callback;
5430 break_policy_func = always_insert_breakpoint;
5434 should_insert_brekpoint (MonoMethod *method) {
5435 switch (break_policy_func (method)) {
5436 case MONO_BREAK_POLICY_ALWAYS:
5438 case MONO_BREAK_POLICY_NEVER:
5440 case MONO_BREAK_POLICY_ON_DBG:
5441 g_warning ("mdb no longer supported");
5444 g_warning ("Incorrect value returned from break policy callback");
5449 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5451 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5453 MonoInst *addr, *store, *load;
5454 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5456 /* the bounds check is already done by the callers */
5457 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5459 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5460 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5461 if (mini_type_is_reference (cfg, fsig->params [2]))
5462 emit_write_barrier (cfg, addr, load);
5464 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5465 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5472 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5474 return mini_type_is_reference (cfg, &klass->byval_arg);
5478 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5480 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5481 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5482 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5483 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5484 MonoInst *iargs [3];
5487 mono_class_setup_vtable (obj_array);
5488 g_assert (helper->slot);
5490 if (sp [0]->type != STACK_OBJ)
5492 if (sp [2]->type != STACK_OBJ)
5499 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5503 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5506 // FIXME-VT: OP_ICONST optimization
5507 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5508 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5509 ins->opcode = OP_STOREV_MEMBASE;
5510 } else if (sp [1]->opcode == OP_ICONST) {
5511 int array_reg = sp [0]->dreg;
5512 int index_reg = sp [1]->dreg;
5513 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5516 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5517 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5519 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5520 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5521 if (generic_class_is_reference_type (cfg, klass))
5522 emit_write_barrier (cfg, addr, sp [2]);
5529 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5534 eklass = mono_class_from_mono_type (fsig->params [2]);
5536 eklass = mono_class_from_mono_type (fsig->ret);
5539 return emit_array_store (cfg, eklass, args, FALSE);
5541 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5542 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5548 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5552 //Only allow for valuetypes
5553 if (!param_klass->valuetype || !return_klass->valuetype)
5557 if (param_klass->has_references || return_klass->has_references)
5560 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5561 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5562 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5565 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5566 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5569 //And have the same size
5570 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5576 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5578 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5579 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5581 //Valuetypes that are semantically equivalent
5582 if (is_unsafe_mov_compatible (param_klass, return_klass))
5585 //Arrays of valuetypes that are semantically equivalent
5586 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5593 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5595 #ifdef MONO_ARCH_SIMD_INTRINSICS
5596 MonoInst *ins = NULL;
5598 if (cfg->opt & MONO_OPT_SIMD) {
5599 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5605 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5609 emit_memory_barrier (MonoCompile *cfg, int kind)
5611 MonoInst *ins = NULL;
5612 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5613 MONO_ADD_INS (cfg->cbb, ins);
5614 ins->backend.memory_barrier_kind = kind;
5620 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5622 MonoInst *ins = NULL;
5625 /* The LLVM backend supports these intrinsics */
5626 if (cmethod->klass == mono_defaults.math_class) {
5627 if (strcmp (cmethod->name, "Sin") == 0) {
5629 } else if (strcmp (cmethod->name, "Cos") == 0) {
5631 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5633 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5637 if (opcode && fsig->param_count == 1) {
5638 MONO_INST_NEW (cfg, ins, opcode);
5639 ins->type = STACK_R8;
5640 ins->dreg = mono_alloc_freg (cfg);
5641 ins->sreg1 = args [0]->dreg;
5642 MONO_ADD_INS (cfg->cbb, ins);
5646 if (cfg->opt & MONO_OPT_CMOV) {
5647 if (strcmp (cmethod->name, "Min") == 0) {
5648 if (fsig->params [0]->type == MONO_TYPE_I4)
5650 if (fsig->params [0]->type == MONO_TYPE_U4)
5651 opcode = OP_IMIN_UN;
5652 else if (fsig->params [0]->type == MONO_TYPE_I8)
5654 else if (fsig->params [0]->type == MONO_TYPE_U8)
5655 opcode = OP_LMIN_UN;
5656 } else if (strcmp (cmethod->name, "Max") == 0) {
5657 if (fsig->params [0]->type == MONO_TYPE_I4)
5659 if (fsig->params [0]->type == MONO_TYPE_U4)
5660 opcode = OP_IMAX_UN;
5661 else if (fsig->params [0]->type == MONO_TYPE_I8)
5663 else if (fsig->params [0]->type == MONO_TYPE_U8)
5664 opcode = OP_LMAX_UN;
5668 if (opcode && fsig->param_count == 2) {
5669 MONO_INST_NEW (cfg, ins, opcode);
5670 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5671 ins->dreg = mono_alloc_ireg (cfg);
5672 ins->sreg1 = args [0]->dreg;
5673 ins->sreg2 = args [1]->dreg;
5674 MONO_ADD_INS (cfg->cbb, ins);
5682 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5684 if (cmethod->klass == mono_defaults.array_class) {
5685 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5686 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5687 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5688 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5689 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5690 return emit_array_unsafe_mov (cfg, fsig, args);
5697 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5699 MonoInst *ins = NULL;
5701 static MonoClass *runtime_helpers_class = NULL;
5702 if (! runtime_helpers_class)
5703 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5704 "System.Runtime.CompilerServices", "RuntimeHelpers");
5706 if (cmethod->klass == mono_defaults.string_class) {
5707 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5708 int dreg = alloc_ireg (cfg);
5709 int index_reg = alloc_preg (cfg);
5710 int add_reg = alloc_preg (cfg);
5712 #if SIZEOF_REGISTER == 8
5713 /* The array reg is 64 bits but the index reg is only 32 */
5714 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5716 index_reg = args [1]->dreg;
5718 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5720 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5721 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5722 add_reg = ins->dreg;
5723 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5726 int mult_reg = alloc_preg (cfg);
5727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5728 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5730 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5732 type_from_op (cfg, ins, NULL, NULL);
5734 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5735 int dreg = alloc_ireg (cfg);
5736 /* Decompose later to allow more optimizations */
5737 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5738 ins->type = STACK_I4;
5739 ins->flags |= MONO_INST_FAULT;
5740 cfg->cbb->has_array_access = TRUE;
5741 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5744 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5745 int mult_reg = alloc_preg (cfg);
5746 int add_reg = alloc_preg (cfg);
5748 /* The corlib functions check for oob already. */
5749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5750 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5752 return cfg->cbb->last_ins;
5755 } else if (cmethod->klass == mono_defaults.object_class) {
5757 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5758 int dreg = alloc_ireg_ref (cfg);
5759 int vt_reg = alloc_preg (cfg);
5760 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5761 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5762 type_from_op (cfg, ins, NULL, NULL);
5765 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5766 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5767 int dreg = alloc_ireg (cfg);
5768 int t1 = alloc_ireg (cfg);
5770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5771 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5772 ins->type = STACK_I4;
5776 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5777 MONO_INST_NEW (cfg, ins, OP_NOP);
5778 MONO_ADD_INS (cfg->cbb, ins);
5782 } else if (cmethod->klass == mono_defaults.array_class) {
5783 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5784 return emit_array_generic_access (cfg, fsig, args, FALSE);
5785 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5786 return emit_array_generic_access (cfg, fsig, args, TRUE);
5788 #ifndef MONO_BIG_ARRAYS
5790 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5793 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5794 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5795 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5796 int dreg = alloc_ireg (cfg);
5797 int bounds_reg = alloc_ireg_mp (cfg);
5798 MonoBasicBlock *end_bb, *szarray_bb;
5799 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5801 NEW_BBLOCK (cfg, end_bb);
5802 NEW_BBLOCK (cfg, szarray_bb);
5804 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5805 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5807 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5808 /* Non-szarray case */
5810 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5811 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5813 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5814 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5815 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5816 MONO_START_BB (cfg, szarray_bb);
5819 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5820 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5822 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5823 MONO_START_BB (cfg, end_bb);
5825 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5826 ins->type = STACK_I4;
5832 if (cmethod->name [0] != 'g')
5835 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5836 int dreg = alloc_ireg (cfg);
5837 int vtable_reg = alloc_preg (cfg);
5838 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5839 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5840 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5841 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5842 type_from_op (cfg, ins, NULL, NULL);
5845 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5846 int dreg = alloc_ireg (cfg);
5848 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5849 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5850 type_from_op (cfg, ins, NULL, NULL);
5855 } else if (cmethod->klass == runtime_helpers_class) {
5857 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5858 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5862 } else if (cmethod->klass == mono_defaults.thread_class) {
5863 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5864 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5865 MONO_ADD_INS (cfg->cbb, ins);
5867 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5868 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5869 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5871 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5873 if (fsig->params [0]->type == MONO_TYPE_I1)
5874 opcode = OP_LOADI1_MEMBASE;
5875 else if (fsig->params [0]->type == MONO_TYPE_U1)
5876 opcode = OP_LOADU1_MEMBASE;
5877 else if (fsig->params [0]->type == MONO_TYPE_I2)
5878 opcode = OP_LOADI2_MEMBASE;
5879 else if (fsig->params [0]->type == MONO_TYPE_U2)
5880 opcode = OP_LOADU2_MEMBASE;
5881 else if (fsig->params [0]->type == MONO_TYPE_I4)
5882 opcode = OP_LOADI4_MEMBASE;
5883 else if (fsig->params [0]->type == MONO_TYPE_U4)
5884 opcode = OP_LOADU4_MEMBASE;
5885 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5886 opcode = OP_LOADI8_MEMBASE;
5887 else if (fsig->params [0]->type == MONO_TYPE_R4)
5888 opcode = OP_LOADR4_MEMBASE;
5889 else if (fsig->params [0]->type == MONO_TYPE_R8)
5890 opcode = OP_LOADR8_MEMBASE;
5891 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5892 opcode = OP_LOAD_MEMBASE;
5895 MONO_INST_NEW (cfg, ins, opcode);
5896 ins->inst_basereg = args [0]->dreg;
5897 ins->inst_offset = 0;
5898 MONO_ADD_INS (cfg->cbb, ins);
5900 switch (fsig->params [0]->type) {
5907 ins->dreg = mono_alloc_ireg (cfg);
5908 ins->type = STACK_I4;
5912 ins->dreg = mono_alloc_lreg (cfg);
5913 ins->type = STACK_I8;
5917 ins->dreg = mono_alloc_ireg (cfg);
5918 #if SIZEOF_REGISTER == 8
5919 ins->type = STACK_I8;
5921 ins->type = STACK_I4;
5926 ins->dreg = mono_alloc_freg (cfg);
5927 ins->type = STACK_R8;
5930 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5931 ins->dreg = mono_alloc_ireg_ref (cfg);
5932 ins->type = STACK_OBJ;
5936 if (opcode == OP_LOADI8_MEMBASE)
5937 ins = mono_decompose_opcode (cfg, ins, NULL);
5939 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5943 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5945 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5947 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5948 opcode = OP_STOREI1_MEMBASE_REG;
5949 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5950 opcode = OP_STOREI2_MEMBASE_REG;
5951 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5952 opcode = OP_STOREI4_MEMBASE_REG;
5953 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5954 opcode = OP_STOREI8_MEMBASE_REG;
5955 else if (fsig->params [0]->type == MONO_TYPE_R4)
5956 opcode = OP_STORER4_MEMBASE_REG;
5957 else if (fsig->params [0]->type == MONO_TYPE_R8)
5958 opcode = OP_STORER8_MEMBASE_REG;
5959 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5960 opcode = OP_STORE_MEMBASE_REG;
5963 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5965 MONO_INST_NEW (cfg, ins, opcode);
5966 ins->sreg1 = args [1]->dreg;
5967 ins->inst_destbasereg = args [0]->dreg;
5968 ins->inst_offset = 0;
5969 MONO_ADD_INS (cfg->cbb, ins);
5971 if (opcode == OP_STOREI8_MEMBASE_REG)
5972 ins = mono_decompose_opcode (cfg, ins, NULL);
5977 } else if (cmethod->klass == mono_defaults.monitor_class) {
5978 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5979 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5982 if (COMPILE_LLVM (cfg)) {
5984 * Pass the argument normally, the LLVM backend will handle the
5985 * calling convention problems.
5987 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5989 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5990 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5991 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5992 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5995 return (MonoInst*)call;
5996 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5997 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
6000 if (COMPILE_LLVM (cfg)) {
6002 * Pass the argument normally, the LLVM backend will handle the
6003 * calling convention problems.
6005 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
6007 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
6008 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6009 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6010 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6013 return (MonoInst*)call;
6015 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6018 if (COMPILE_LLVM (cfg)) {
6019 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6021 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6022 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6023 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6024 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6027 return (MonoInst*)call;
6030 } else if (cmethod->klass->image == mono_defaults.corlib &&
6031 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6032 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6035 #if SIZEOF_REGISTER == 8
6036 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6037 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6038 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6039 ins->dreg = mono_alloc_preg (cfg);
6040 ins->sreg1 = args [0]->dreg;
6041 ins->type = STACK_I8;
6042 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6043 MONO_ADD_INS (cfg->cbb, ins);
6047 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6049 /* 64 bit reads are already atomic */
6050 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6051 load_ins->dreg = mono_alloc_preg (cfg);
6052 load_ins->inst_basereg = args [0]->dreg;
6053 load_ins->inst_offset = 0;
6054 load_ins->type = STACK_I8;
6055 MONO_ADD_INS (cfg->cbb, load_ins);
6057 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6064 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6065 MonoInst *ins_iconst;
6068 if (fsig->params [0]->type == MONO_TYPE_I4) {
6069 opcode = OP_ATOMIC_ADD_I4;
6070 cfg->has_atomic_add_i4 = TRUE;
6072 #if SIZEOF_REGISTER == 8
6073 else if (fsig->params [0]->type == MONO_TYPE_I8)
6074 opcode = OP_ATOMIC_ADD_I8;
6077 if (!mono_arch_opcode_supported (opcode))
6079 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6080 ins_iconst->inst_c0 = 1;
6081 ins_iconst->dreg = mono_alloc_ireg (cfg);
6082 MONO_ADD_INS (cfg->cbb, ins_iconst);
6084 MONO_INST_NEW (cfg, ins, opcode);
6085 ins->dreg = mono_alloc_ireg (cfg);
6086 ins->inst_basereg = args [0]->dreg;
6087 ins->inst_offset = 0;
6088 ins->sreg2 = ins_iconst->dreg;
6089 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6090 MONO_ADD_INS (cfg->cbb, ins);
6092 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6093 MonoInst *ins_iconst;
6096 if (fsig->params [0]->type == MONO_TYPE_I4) {
6097 opcode = OP_ATOMIC_ADD_I4;
6098 cfg->has_atomic_add_i4 = TRUE;
6100 #if SIZEOF_REGISTER == 8
6101 else if (fsig->params [0]->type == MONO_TYPE_I8)
6102 opcode = OP_ATOMIC_ADD_I8;
6105 if (!mono_arch_opcode_supported (opcode))
6107 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6108 ins_iconst->inst_c0 = -1;
6109 ins_iconst->dreg = mono_alloc_ireg (cfg);
6110 MONO_ADD_INS (cfg->cbb, ins_iconst);
6112 MONO_INST_NEW (cfg, ins, opcode);
6113 ins->dreg = mono_alloc_ireg (cfg);
6114 ins->inst_basereg = args [0]->dreg;
6115 ins->inst_offset = 0;
6116 ins->sreg2 = ins_iconst->dreg;
6117 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6118 MONO_ADD_INS (cfg->cbb, ins);
6120 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6123 if (fsig->params [0]->type == MONO_TYPE_I4) {
6124 opcode = OP_ATOMIC_ADD_I4;
6125 cfg->has_atomic_add_i4 = TRUE;
6127 #if SIZEOF_REGISTER == 8
6128 else if (fsig->params [0]->type == MONO_TYPE_I8)
6129 opcode = OP_ATOMIC_ADD_I8;
6132 if (!mono_arch_opcode_supported (opcode))
6134 MONO_INST_NEW (cfg, ins, opcode);
6135 ins->dreg = mono_alloc_ireg (cfg);
6136 ins->inst_basereg = args [0]->dreg;
6137 ins->inst_offset = 0;
6138 ins->sreg2 = args [1]->dreg;
6139 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6140 MONO_ADD_INS (cfg->cbb, ins);
6143 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6144 MonoInst *f2i = NULL, *i2f;
6145 guint32 opcode, f2i_opcode, i2f_opcode;
6146 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6147 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6149 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6150 fsig->params [0]->type == MONO_TYPE_R4) {
6151 opcode = OP_ATOMIC_EXCHANGE_I4;
6152 f2i_opcode = OP_MOVE_F_TO_I4;
6153 i2f_opcode = OP_MOVE_I4_TO_F;
6154 cfg->has_atomic_exchange_i4 = TRUE;
6156 #if SIZEOF_REGISTER == 8
6158 fsig->params [0]->type == MONO_TYPE_I8 ||
6159 fsig->params [0]->type == MONO_TYPE_R8 ||
6160 fsig->params [0]->type == MONO_TYPE_I) {
6161 opcode = OP_ATOMIC_EXCHANGE_I8;
6162 f2i_opcode = OP_MOVE_F_TO_I8;
6163 i2f_opcode = OP_MOVE_I8_TO_F;
6166 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6167 opcode = OP_ATOMIC_EXCHANGE_I4;
6168 cfg->has_atomic_exchange_i4 = TRUE;
6174 if (!mono_arch_opcode_supported (opcode))
6178 /* TODO: Decompose these opcodes instead of bailing here. */
6179 if (COMPILE_SOFT_FLOAT (cfg))
6182 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6183 f2i->dreg = mono_alloc_ireg (cfg);
6184 f2i->sreg1 = args [1]->dreg;
6185 if (f2i_opcode == OP_MOVE_F_TO_I4)
6186 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6187 MONO_ADD_INS (cfg->cbb, f2i);
6190 MONO_INST_NEW (cfg, ins, opcode);
6191 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6192 ins->inst_basereg = args [0]->dreg;
6193 ins->inst_offset = 0;
6194 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6195 MONO_ADD_INS (cfg->cbb, ins);
6197 switch (fsig->params [0]->type) {
6199 ins->type = STACK_I4;
6202 ins->type = STACK_I8;
6205 #if SIZEOF_REGISTER == 8
6206 ins->type = STACK_I8;
6208 ins->type = STACK_I4;
6213 ins->type = STACK_R8;
6216 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6217 ins->type = STACK_OBJ;
6222 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6223 i2f->dreg = mono_alloc_freg (cfg);
6224 i2f->sreg1 = ins->dreg;
6225 i2f->type = STACK_R8;
6226 if (i2f_opcode == OP_MOVE_I4_TO_F)
6227 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6228 MONO_ADD_INS (cfg->cbb, i2f);
6233 if (cfg->gen_write_barriers && is_ref)
6234 emit_write_barrier (cfg, args [0], args [1]);
6236 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6237 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6238 guint32 opcode, f2i_opcode, i2f_opcode;
6239 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6240 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6242 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6243 fsig->params [1]->type == MONO_TYPE_R4) {
6244 opcode = OP_ATOMIC_CAS_I4;
6245 f2i_opcode = OP_MOVE_F_TO_I4;
6246 i2f_opcode = OP_MOVE_I4_TO_F;
6247 cfg->has_atomic_cas_i4 = TRUE;
6249 #if SIZEOF_REGISTER == 8
6251 fsig->params [1]->type == MONO_TYPE_I8 ||
6252 fsig->params [1]->type == MONO_TYPE_R8 ||
6253 fsig->params [1]->type == MONO_TYPE_I) {
6254 opcode = OP_ATOMIC_CAS_I8;
6255 f2i_opcode = OP_MOVE_F_TO_I8;
6256 i2f_opcode = OP_MOVE_I8_TO_F;
6259 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6260 opcode = OP_ATOMIC_CAS_I4;
6261 cfg->has_atomic_cas_i4 = TRUE;
6267 if (!mono_arch_opcode_supported (opcode))
6271 /* TODO: Decompose these opcodes instead of bailing here. */
6272 if (COMPILE_SOFT_FLOAT (cfg))
6275 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6276 f2i_new->dreg = mono_alloc_ireg (cfg);
6277 f2i_new->sreg1 = args [1]->dreg;
6278 if (f2i_opcode == OP_MOVE_F_TO_I4)
6279 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6280 MONO_ADD_INS (cfg->cbb, f2i_new);
6282 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6283 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6284 f2i_cmp->sreg1 = args [2]->dreg;
6285 if (f2i_opcode == OP_MOVE_F_TO_I4)
6286 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6287 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6290 MONO_INST_NEW (cfg, ins, opcode);
6291 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6292 ins->sreg1 = args [0]->dreg;
6293 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6294 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6295 MONO_ADD_INS (cfg->cbb, ins);
6297 switch (fsig->params [1]->type) {
6299 ins->type = STACK_I4;
6302 ins->type = STACK_I8;
6305 #if SIZEOF_REGISTER == 8
6306 ins->type = STACK_I8;
6308 ins->type = STACK_I4;
6313 ins->type = STACK_R8;
6316 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6317 ins->type = STACK_OBJ;
6322 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6323 i2f->dreg = mono_alloc_freg (cfg);
6324 i2f->sreg1 = ins->dreg;
6325 i2f->type = STACK_R8;
6326 if (i2f_opcode == OP_MOVE_I4_TO_F)
6327 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6328 MONO_ADD_INS (cfg->cbb, i2f);
6333 if (cfg->gen_write_barriers && is_ref)
6334 emit_write_barrier (cfg, args [0], args [1]);
6336 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6337 fsig->params [1]->type == MONO_TYPE_I4) {
6338 MonoInst *cmp, *ceq;
6340 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6343 /* int32 r = CAS (location, value, comparand); */
6344 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6345 ins->dreg = alloc_ireg (cfg);
6346 ins->sreg1 = args [0]->dreg;
6347 ins->sreg2 = args [1]->dreg;
6348 ins->sreg3 = args [2]->dreg;
6349 ins->type = STACK_I4;
6350 MONO_ADD_INS (cfg->cbb, ins);
6352 /* bool result = r == comparand; */
6353 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6354 cmp->sreg1 = ins->dreg;
6355 cmp->sreg2 = args [2]->dreg;
6356 cmp->type = STACK_I4;
6357 MONO_ADD_INS (cfg->cbb, cmp);
6359 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6360 ceq->dreg = alloc_ireg (cfg);
6361 ceq->type = STACK_I4;
6362 MONO_ADD_INS (cfg->cbb, ceq);
6364 /* *success = result; */
6365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6367 cfg->has_atomic_cas_i4 = TRUE;
6369 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6370 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6374 } else if (cmethod->klass->image == mono_defaults.corlib &&
6375 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6376 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6379 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6381 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6382 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6384 if (fsig->params [0]->type == MONO_TYPE_I1)
6385 opcode = OP_ATOMIC_LOAD_I1;
6386 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6387 opcode = OP_ATOMIC_LOAD_U1;
6388 else if (fsig->params [0]->type == MONO_TYPE_I2)
6389 opcode = OP_ATOMIC_LOAD_I2;
6390 else if (fsig->params [0]->type == MONO_TYPE_U2)
6391 opcode = OP_ATOMIC_LOAD_U2;
6392 else if (fsig->params [0]->type == MONO_TYPE_I4)
6393 opcode = OP_ATOMIC_LOAD_I4;
6394 else if (fsig->params [0]->type == MONO_TYPE_U4)
6395 opcode = OP_ATOMIC_LOAD_U4;
6396 else if (fsig->params [0]->type == MONO_TYPE_R4)
6397 opcode = OP_ATOMIC_LOAD_R4;
6398 else if (fsig->params [0]->type == MONO_TYPE_R8)
6399 opcode = OP_ATOMIC_LOAD_R8;
6400 #if SIZEOF_REGISTER == 8
6401 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6402 opcode = OP_ATOMIC_LOAD_I8;
6403 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6404 opcode = OP_ATOMIC_LOAD_U8;
6406 else if (fsig->params [0]->type == MONO_TYPE_I)
6407 opcode = OP_ATOMIC_LOAD_I4;
6408 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6409 opcode = OP_ATOMIC_LOAD_U4;
6413 if (!mono_arch_opcode_supported (opcode))
6416 MONO_INST_NEW (cfg, ins, opcode);
6417 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6418 ins->sreg1 = args [0]->dreg;
6419 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6420 MONO_ADD_INS (cfg->cbb, ins);
6422 switch (fsig->params [0]->type) {
6423 case MONO_TYPE_BOOLEAN:
6430 ins->type = STACK_I4;
6434 ins->type = STACK_I8;
6438 #if SIZEOF_REGISTER == 8
6439 ins->type = STACK_I8;
6441 ins->type = STACK_I4;
6446 ins->type = STACK_R8;
6449 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6450 ins->type = STACK_OBJ;
6456 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6458 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6460 if (fsig->params [0]->type == MONO_TYPE_I1)
6461 opcode = OP_ATOMIC_STORE_I1;
6462 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6463 opcode = OP_ATOMIC_STORE_U1;
6464 else if (fsig->params [0]->type == MONO_TYPE_I2)
6465 opcode = OP_ATOMIC_STORE_I2;
6466 else if (fsig->params [0]->type == MONO_TYPE_U2)
6467 opcode = OP_ATOMIC_STORE_U2;
6468 else if (fsig->params [0]->type == MONO_TYPE_I4)
6469 opcode = OP_ATOMIC_STORE_I4;
6470 else if (fsig->params [0]->type == MONO_TYPE_U4)
6471 opcode = OP_ATOMIC_STORE_U4;
6472 else if (fsig->params [0]->type == MONO_TYPE_R4)
6473 opcode = OP_ATOMIC_STORE_R4;
6474 else if (fsig->params [0]->type == MONO_TYPE_R8)
6475 opcode = OP_ATOMIC_STORE_R8;
6476 #if SIZEOF_REGISTER == 8
6477 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6478 opcode = OP_ATOMIC_STORE_I8;
6479 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6480 opcode = OP_ATOMIC_STORE_U8;
6482 else if (fsig->params [0]->type == MONO_TYPE_I)
6483 opcode = OP_ATOMIC_STORE_I4;
6484 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6485 opcode = OP_ATOMIC_STORE_U4;
6489 if (!mono_arch_opcode_supported (opcode))
6492 MONO_INST_NEW (cfg, ins, opcode);
6493 ins->dreg = args [0]->dreg;
6494 ins->sreg1 = args [1]->dreg;
6495 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6496 MONO_ADD_INS (cfg->cbb, ins);
6498 if (cfg->gen_write_barriers && is_ref)
6499 emit_write_barrier (cfg, args [0], args [1]);
6505 } else if (cmethod->klass->image == mono_defaults.corlib &&
6506 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6507 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6508 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6509 if (should_insert_brekpoint (cfg->method)) {
6510 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6512 MONO_INST_NEW (cfg, ins, OP_NOP);
6513 MONO_ADD_INS (cfg->cbb, ins);
6517 } else if (cmethod->klass->image == mono_defaults.corlib &&
6518 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6519 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6520 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6522 EMIT_NEW_ICONST (cfg, ins, 1);
6524 EMIT_NEW_ICONST (cfg, ins, 0);
6527 } else if (cmethod->klass == mono_defaults.math_class) {
6529 * There is general branchless code for Min/Max, but it does not work for
6531 * http://everything2.com/?node_id=1051618
6533 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6534 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6535 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6536 !strcmp (cmethod->klass->name, "Selector")) {
6537 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6538 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6539 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6542 MonoJumpInfoToken *ji;
6545 cfg->disable_llvm = TRUE;
6547 if (args [0]->opcode == OP_GOT_ENTRY) {
6548 pi = args [0]->inst_p1;
6549 g_assert (pi->opcode == OP_PATCH_INFO);
6550 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6553 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6554 ji = args [0]->inst_p0;
6557 NULLIFY_INS (args [0]);
6560 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6561 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6562 ins->dreg = mono_alloc_ireg (cfg);
6564 ins->inst_p0 = mono_string_to_utf8 (s);
6565 MONO_ADD_INS (cfg->cbb, ins);
6571 #ifdef MONO_ARCH_SIMD_INTRINSICS
6572 if (cfg->opt & MONO_OPT_SIMD) {
6573 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6579 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6583 if (COMPILE_LLVM (cfg)) {
6584 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6589 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6593 * This entry point could be used later for arbitrary method
6596 inline static MonoInst*
6597 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6598 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6600 if (method->klass == mono_defaults.string_class) {
6601 /* managed string allocation support */
6602 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6603 MonoInst *iargs [2];
6604 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6605 MonoMethod *managed_alloc = NULL;
6607 g_assert (vtable); /*Should not fail since it System.String*/
6608 #ifndef MONO_CROSS_COMPILE
6609 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6613 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6614 iargs [1] = args [0];
6615 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6622 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6624 MonoInst *store, *temp;
6627 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6628 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6631 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6632 * would be different than the MonoInst's used to represent arguments, and
6633 * the ldelema implementation can't deal with that.
6634 * Solution: When ldelema is used on an inline argument, create a var for
6635 * it, emit ldelema on that var, and emit the saving code below in
6636 * inline_method () if needed.
6638 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6639 cfg->args [i] = temp;
6640 /* This uses cfg->args [i] which is set by the preceeding line */
6641 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6642 store->cil_code = sp [0]->cil_code;
6647 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6648 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6650 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6652 check_inline_called_method_name_limit (MonoMethod *called_method)
6655 static const char *limit = NULL;
6657 if (limit == NULL) {
6658 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6660 if (limit_string != NULL)
6661 limit = limit_string;
6666 if (limit [0] != '\0') {
6667 char *called_method_name = mono_method_full_name (called_method, TRUE);
6669 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6670 g_free (called_method_name);
6672 //return (strncmp_result <= 0);
6673 return (strncmp_result == 0);
6680 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6682 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6685 static const char *limit = NULL;
6687 if (limit == NULL) {
6688 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6689 if (limit_string != NULL) {
6690 limit = limit_string;
6696 if (limit [0] != '\0') {
6697 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6699 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6700 g_free (caller_method_name);
6702 //return (strncmp_result <= 0);
6703 return (strncmp_result == 0);
6711 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6713 static double r8_0 = 0.0;
6714 static float r4_0 = 0.0;
6718 rtype = mini_get_underlying_type (cfg, rtype);
6722 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6723 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6724 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6725 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6726 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6727 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6728 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6729 ins->type = STACK_R4;
6730 ins->inst_p0 = (void*)&r4_0;
6732 MONO_ADD_INS (cfg->cbb, ins);
6733 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6734 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6735 ins->type = STACK_R8;
6736 ins->inst_p0 = (void*)&r8_0;
6738 MONO_ADD_INS (cfg->cbb, ins);
6739 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6740 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6741 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6742 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6743 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6745 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6750 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6754 rtype = mini_get_underlying_type (cfg, rtype);
6758 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6759 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6760 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6761 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6762 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6763 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6764 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6765 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6766 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6767 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6768 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6769 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6770 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6771 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6773 emit_init_rvar (cfg, dreg, rtype);
6777 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6779 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6781 MonoInst *var = cfg->locals [local];
6782 if (COMPILE_SOFT_FLOAT (cfg)) {
6784 int reg = alloc_dreg (cfg, var->type);
6785 emit_init_rvar (cfg, reg, type);
6786 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6789 emit_init_rvar (cfg, var->dreg, type);
6791 emit_dummy_init_rvar (cfg, var->dreg, type);
6798 * Return the cost of inlining CMETHOD.
6801 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6802 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6804 MonoInst *ins, *rvar = NULL;
6805 MonoMethodHeader *cheader;
6806 MonoBasicBlock *ebblock, *sbblock;
6808 MonoMethod *prev_inlined_method;
6809 MonoInst **prev_locals, **prev_args;
6810 MonoType **prev_arg_types;
6811 guint prev_real_offset;
6812 GHashTable *prev_cbb_hash;
6813 MonoBasicBlock **prev_cil_offset_to_bb;
6814 MonoBasicBlock *prev_cbb;
6815 unsigned char* prev_cil_start;
6816 guint32 prev_cil_offset_to_bb_len;
6817 MonoMethod *prev_current_method;
6818 MonoGenericContext *prev_generic_context;
6819 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6821 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6823 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6824 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6827 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6828 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6833 fsig = mono_method_signature (cmethod);
6835 if (cfg->verbose_level > 2)
6836 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6838 if (!cmethod->inline_info) {
6839 cfg->stat_inlineable_methods++;
6840 cmethod->inline_info = 1;
6843 /* allocate local variables */
6844 cheader = mono_method_get_header (cmethod);
6846 if (cheader == NULL || mono_loader_get_last_error ()) {
6847 MonoLoaderError *error = mono_loader_get_last_error ();
6850 mono_metadata_free_mh (cheader);
6851 if (inline_always && error)
6852 mono_cfg_set_exception (cfg, error->exception_type);
6854 mono_loader_clear_error ();
6858 /*Must verify before creating locals as it can cause the JIT to assert.*/
6859 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6860 mono_metadata_free_mh (cheader);
6864 /* allocate space to store the return value */
6865 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6866 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6869 prev_locals = cfg->locals;
6870 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6871 for (i = 0; i < cheader->num_locals; ++i)
6872 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6874 /* allocate start and end blocks */
6875 /* This is needed so if the inline is aborted, we can clean up */
6876 NEW_BBLOCK (cfg, sbblock);
6877 sbblock->real_offset = real_offset;
6879 NEW_BBLOCK (cfg, ebblock);
6880 ebblock->block_num = cfg->num_bblocks++;
6881 ebblock->real_offset = real_offset;
6883 prev_args = cfg->args;
6884 prev_arg_types = cfg->arg_types;
6885 prev_inlined_method = cfg->inlined_method;
6886 cfg->inlined_method = cmethod;
6887 cfg->ret_var_set = FALSE;
6888 cfg->inline_depth ++;
6889 prev_real_offset = cfg->real_offset;
6890 prev_cbb_hash = cfg->cbb_hash;
6891 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6892 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6893 prev_cil_start = cfg->cil_start;
6894 prev_cbb = cfg->cbb;
6895 prev_current_method = cfg->current_method;
6896 prev_generic_context = cfg->generic_context;
6897 prev_ret_var_set = cfg->ret_var_set;
6898 prev_disable_inline = cfg->disable_inline;
6900 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6903 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6905 ret_var_set = cfg->ret_var_set;
6907 cfg->inlined_method = prev_inlined_method;
6908 cfg->real_offset = prev_real_offset;
6909 cfg->cbb_hash = prev_cbb_hash;
6910 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6911 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6912 cfg->cil_start = prev_cil_start;
6913 cfg->locals = prev_locals;
6914 cfg->args = prev_args;
6915 cfg->arg_types = prev_arg_types;
6916 cfg->current_method = prev_current_method;
6917 cfg->generic_context = prev_generic_context;
6918 cfg->ret_var_set = prev_ret_var_set;
6919 cfg->disable_inline = prev_disable_inline;
6920 cfg->inline_depth --;
6922 if ((costs >= 0 && costs < 60) || inline_always) {
6923 if (cfg->verbose_level > 2)
6924 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6926 cfg->stat_inlined_methods++;
6928 /* always add some code to avoid block split failures */
6929 MONO_INST_NEW (cfg, ins, OP_NOP);
6930 MONO_ADD_INS (prev_cbb, ins);
6932 prev_cbb->next_bb = sbblock;
6933 link_bblock (cfg, prev_cbb, sbblock);
6936 * Get rid of the begin and end bblocks if possible to aid local
6939 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6941 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6942 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6944 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6945 MonoBasicBlock *prev = ebblock->in_bb [0];
6946 mono_merge_basic_blocks (cfg, prev, ebblock);
6948 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6949 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6950 cfg->cbb = prev_cbb;
6954 * Its possible that the rvar is set in some prev bblock, but not in others.
6960 for (i = 0; i < ebblock->in_count; ++i) {
6961 bb = ebblock->in_bb [i];
6963 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6966 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6975 *out_cbb = cfg->cbb;
6979 * If the inlined method contains only a throw, then the ret var is not
6980 * set, so set it to a dummy value.
6983 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6985 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6988 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6991 if (cfg->verbose_level > 2)
6992 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6993 cfg->exception_type = MONO_EXCEPTION_NONE;
6994 mono_loader_clear_error ();
6996 /* This gets rid of the newly added bblocks */
6997 cfg->cbb = prev_cbb;
6999 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7004 * Some of these comments may well be out-of-date.
7005 * Design decisions: we do a single pass over the IL code (and we do bblock
7006 * splitting/merging in the few cases when it's required: a back jump to an IL
7007 * address that was not already seen as bblock starting point).
7008 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7009 * Complex operations are decomposed in simpler ones right away. We need to let the
7010 * arch-specific code peek and poke inside this process somehow (except when the
7011 * optimizations can take advantage of the full semantic info of coarse opcodes).
7012 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7013 * MonoInst->opcode initially is the IL opcode or some simplification of that
7014 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7015 * opcode with value bigger than OP_LAST.
7016 * At this point the IR can be handed over to an interpreter, a dumb code generator
7017 * or to the optimizing code generator that will translate it to SSA form.
7019 * Profiling directed optimizations.
7020 * We may compile by default with few or no optimizations and instrument the code
7021 * or the user may indicate what methods to optimize the most either in a config file
7022 * or through repeated runs where the compiler applies offline the optimizations to
7023 * each method and then decides if it was worth it.
7026 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7027 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7028 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7029 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7030 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7031 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7032 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7033 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7035 /* offset from br.s -> br like opcodes */
7036 #define BIG_BRANCH_OFFSET 13
7039 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7041 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7043 return b == NULL || b == bb;
7047 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7049 unsigned char *ip = start;
7050 unsigned char *target;
7053 MonoBasicBlock *bblock;
7054 const MonoOpcode *opcode;
7057 cli_addr = ip - start;
7058 i = mono_opcode_value ((const guint8 **)&ip, end);
7061 opcode = &mono_opcodes [i];
7062 switch (opcode->argument) {
7063 case MonoInlineNone:
7066 case MonoInlineString:
7067 case MonoInlineType:
7068 case MonoInlineField:
7069 case MonoInlineMethod:
7072 case MonoShortInlineR:
7079 case MonoShortInlineVar:
7080 case MonoShortInlineI:
7083 case MonoShortInlineBrTarget:
7084 target = start + cli_addr + 2 + (signed char)ip [1];
7085 GET_BBLOCK (cfg, bblock, target);
7088 GET_BBLOCK (cfg, bblock, ip);
7090 case MonoInlineBrTarget:
7091 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7092 GET_BBLOCK (cfg, bblock, target);
7095 GET_BBLOCK (cfg, bblock, ip);
7097 case MonoInlineSwitch: {
7098 guint32 n = read32 (ip + 1);
7101 cli_addr += 5 + 4 * n;
7102 target = start + cli_addr;
7103 GET_BBLOCK (cfg, bblock, target);
7105 for (j = 0; j < n; ++j) {
7106 target = start + cli_addr + (gint32)read32 (ip);
7107 GET_BBLOCK (cfg, bblock, target);
7117 g_assert_not_reached ();
7120 if (i == CEE_THROW) {
7121 unsigned char *bb_start = ip - 1;
7123 /* Find the start of the bblock containing the throw */
7125 while ((bb_start >= start) && !bblock) {
7126 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7130 bblock->out_of_line = 1;
7140 static inline MonoMethod *
7141 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7145 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7146 method = mono_method_get_wrapper_data (m, token);
7149 method = mono_class_inflate_generic_method_checked (method, context, &error);
7150 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7153 method = mono_get_method_full (m->klass->image, token, klass, context);
7159 static inline MonoMethod *
7160 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7162 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7164 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7170 static inline MonoClass*
7171 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7176 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7177 klass = mono_method_get_wrapper_data (method, token);
7179 klass = mono_class_inflate_generic_class (klass, context);
7181 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7182 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7185 mono_class_init (klass);
7189 static inline MonoMethodSignature*
7190 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7192 MonoMethodSignature *fsig;
7194 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7197 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7199 fsig = mono_inflate_generic_signature (fsig, context, &error);
7201 g_assert (mono_error_ok (&error));
7204 fsig = mono_metadata_parse_signature (method->klass->image, token);
7210 * Returns TRUE if the JIT should abort inlining because "callee"
7211 * is influenced by security attributes.
7214 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7218 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7222 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7223 if (result == MONO_JIT_SECURITY_OK)
7226 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7227 /* Generate code to throw a SecurityException before the actual call/link */
7228 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7231 NEW_ICONST (cfg, args [0], 4);
7232 NEW_METHODCONST (cfg, args [1], caller);
7233 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7234 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7235 /* don't hide previous results */
7236 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7237 cfg->exception_data = result;
7245 throw_exception (void)
7247 static MonoMethod *method = NULL;
7250 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7251 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7258 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7260 MonoMethod *thrower = throw_exception ();
7263 EMIT_NEW_PCONST (cfg, args [0], ex);
7264 mono_emit_method_call (cfg, thrower, args, NULL);
7268 * Return the original method is a wrapper is specified. We can only access
7269 * the custom attributes from the original method.
7272 get_original_method (MonoMethod *method)
7274 if (method->wrapper_type == MONO_WRAPPER_NONE)
7277 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7278 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7281 /* in other cases we need to find the original method */
7282 return mono_marshal_method_from_wrapper (method);
7286 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7287 MonoBasicBlock *bblock, unsigned char *ip)
7289 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7290 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7292 emit_throw_exception (cfg, ex);
7296 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7297 MonoBasicBlock *bblock, unsigned char *ip)
7299 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7300 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7302 emit_throw_exception (cfg, ex);
7306 * Check that the IL instructions at ip are the array initialization
7307 * sequence and return the pointer to the data and the size.
7310 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7313 * newarr[System.Int32]
7315 * ldtoken field valuetype ...
7316 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7318 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7320 guint32 token = read32 (ip + 7);
7321 guint32 field_token = read32 (ip + 2);
7322 guint32 field_index = field_token & 0xffffff;
7324 const char *data_ptr;
7326 MonoMethod *cmethod;
7327 MonoClass *dummy_class;
7328 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7332 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7336 *out_field_token = field_token;
7338 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7341 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7343 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7344 case MONO_TYPE_BOOLEAN:
7348 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7349 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7350 case MONO_TYPE_CHAR:
7367 if (size > mono_type_size (field->type, &dummy_align))
7370 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7371 if (!image_is_dynamic (method->klass->image)) {
7372 field_index = read32 (ip + 2) & 0xffffff;
7373 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7374 data_ptr = mono_image_rva_map (method->klass->image, rva);
7375 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7376 /* for aot code we do the lookup on load */
7377 if (aot && data_ptr)
7378 return GUINT_TO_POINTER (rva);
7380 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7382 data_ptr = mono_field_get_data (field);
7390 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7392 char *method_fname = mono_method_full_name (method, TRUE);
7394 MonoMethodHeader *header = mono_method_get_header (method);
7396 if (header->code_size == 0)
7397 method_code = g_strdup ("method body is empty.");
7399 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7401 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7402 g_free (method_fname);
7403 g_free (method_code);
7404 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7408 set_exception_object (MonoCompile *cfg, MonoException *exception)
7410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7411 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7412 cfg->exception_ptr = exception;
7416 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7419 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7420 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7421 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7422 /* Optimize reg-reg moves away */
7424 * Can't optimize other opcodes, since sp[0] might point to
7425 * the last ins of a decomposed opcode.
7427 sp [0]->dreg = (cfg)->locals [n]->dreg;
7429 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7434 * ldloca inhibits many optimizations so try to get rid of it in common
7437 static inline unsigned char *
7438 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7448 local = read16 (ip + 2);
7452 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7453 /* From the INITOBJ case */
7454 token = read32 (ip + 2);
7455 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7456 CHECK_TYPELOAD (klass);
7457 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7458 emit_init_local (cfg, local, type, TRUE);
7466 is_exception_class (MonoClass *class)
7469 if (class == mono_defaults.exception_class)
7471 class = class->parent;
7477 * is_jit_optimizer_disabled:
7479 * Determine whenever M's assembly has a DebuggableAttribute with the
7480 * IsJITOptimizerDisabled flag set.
7483 is_jit_optimizer_disabled (MonoMethod *m)
7485 MonoAssembly *ass = m->klass->image->assembly;
7486 MonoCustomAttrInfo* attrs;
7487 static MonoClass *klass;
7489 gboolean val = FALSE;
7492 if (ass->jit_optimizer_disabled_inited)
7493 return ass->jit_optimizer_disabled;
7496 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7499 ass->jit_optimizer_disabled = FALSE;
7500 mono_memory_barrier ();
7501 ass->jit_optimizer_disabled_inited = TRUE;
7505 attrs = mono_custom_attrs_from_assembly (ass);
7507 for (i = 0; i < attrs->num_attrs; ++i) {
7508 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7510 MonoMethodSignature *sig;
7512 if (!attr->ctor || attr->ctor->klass != klass)
7514 /* Decode the attribute. See reflection.c */
7515 p = (const char*)attr->data;
7516 g_assert (read16 (p) == 0x0001);
7519 // FIXME: Support named parameters
7520 sig = mono_method_signature (attr->ctor);
7521 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7523 /* Two boolean arguments */
7527 mono_custom_attrs_free (attrs);
7530 ass->jit_optimizer_disabled = val;
7531 mono_memory_barrier ();
7532 ass->jit_optimizer_disabled_inited = TRUE;
7538 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7540 gboolean supported_tail_call;
7543 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7544 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7546 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7549 for (i = 0; i < fsig->param_count; ++i) {
7550 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7551 /* These can point to the current method's stack */
7552 supported_tail_call = FALSE;
7554 if (fsig->hasthis && cmethod->klass->valuetype)
7555 /* this might point to the current method's stack */
7556 supported_tail_call = FALSE;
7557 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7558 supported_tail_call = FALSE;
7559 if (cfg->method->save_lmf)
7560 supported_tail_call = FALSE;
7561 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7562 supported_tail_call = FALSE;
7563 if (call_opcode != CEE_CALL)
7564 supported_tail_call = FALSE;
7566 /* Debugging support */
7568 if (supported_tail_call) {
7569 if (!mono_debug_count ())
7570 supported_tail_call = FALSE;
7574 return supported_tail_call;
7577 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7578 * it to the thread local value based on the tls_offset field. Every other kind of access to
7579 * the field causes an assert.
7582 is_magic_tls_access (MonoClassField *field)
7584 if (strcmp (field->name, "tlsdata"))
7586 if (strcmp (field->parent->name, "ThreadLocal`1"))
7588 return field->parent->image == mono_defaults.corlib;
7591 /* emits the code needed to access a managed tls var (like ThreadStatic)
7592 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7593 * pointer for the current thread.
7594 * Returns the MonoInst* representing the address of the tls var.
7597 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7600 int static_data_reg, array_reg, dreg;
7601 int offset2_reg, idx_reg;
7602 // inlined access to the tls data
7603 // idx = (offset >> 24) - 1;
7604 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7605 static_data_reg = alloc_ireg (cfg);
7606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7607 idx_reg = alloc_ireg (cfg);
7608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7611 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7612 array_reg = alloc_ireg (cfg);
7613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7614 offset2_reg = alloc_ireg (cfg);
7615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7616 dreg = alloc_ireg (cfg);
7617 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7622 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7623 * this address is cached per-method in cached_tls_addr.
7626 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7628 MonoInst *load, *addr, *temp, *store, *thread_ins;
7629 MonoClassField *offset_field;
7631 if (*cached_tls_addr) {
7632 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7635 thread_ins = mono_get_thread_intrinsic (cfg);
7636 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7638 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7640 MONO_ADD_INS (cfg->cbb, thread_ins);
7642 MonoMethod *thread_method;
7643 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7644 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7646 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7647 addr->klass = mono_class_from_mono_type (tls_field->type);
7648 addr->type = STACK_MP;
7649 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7650 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7652 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7659 * Handle calls made to ctors from NEWOBJ opcodes.
7661 * REF_BBLOCK will point to the current bblock after the call.
7664 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7665 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7667 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7668 MonoBasicBlock *bblock = *ref_bblock;
7670 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7671 mono_method_is_generic_sharable (cmethod, TRUE)) {
7672 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7673 mono_class_vtable (cfg->domain, cmethod->klass);
7674 CHECK_TYPELOAD (cmethod->klass);
7676 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7677 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7680 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7681 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7683 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7685 CHECK_TYPELOAD (cmethod->klass);
7686 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7691 /* Avoid virtual calls to ctors if possible */
7692 if (mono_class_is_marshalbyref (cmethod->klass))
7693 callvirt_this_arg = sp [0];
7695 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7696 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7697 CHECK_CFG_EXCEPTION;
7698 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7699 mono_method_check_inlining (cfg, cmethod) &&
7700 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7703 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7704 cfg->real_offset += 5;
7706 *inline_costs += costs - 5;
7707 *ref_bblock = bblock;
7709 INLINE_FAILURE ("inline failure");
7710 // FIXME-VT: Clean this up
7711 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7712 GSHAREDVT_FAILURE(*ip);
7713 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7715 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7718 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7719 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7720 } else if (context_used &&
7721 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7722 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7723 MonoInst *cmethod_addr;
7725 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7727 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7728 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7730 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7732 INLINE_FAILURE ("ctor call");
7733 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7734 callvirt_this_arg, NULL, vtable_arg);
7741 * mono_method_to_ir:
7743 * Translate the .net IL into linear IR.
7746 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7747 MonoInst *return_var, MonoInst **inline_args,
7748 guint inline_offset, gboolean is_virtual_call)
7751 MonoInst *ins, **sp, **stack_start;
7752 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7753 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7754 MonoMethod *cmethod, *method_definition;
7755 MonoInst **arg_array;
7756 MonoMethodHeader *header;
7758 guint32 token, ins_flag;
7760 MonoClass *constrained_class = NULL;
7761 unsigned char *ip, *end, *target, *err_pos;
7762 MonoMethodSignature *sig;
7763 MonoGenericContext *generic_context = NULL;
7764 MonoGenericContainer *generic_container = NULL;
7765 MonoType **param_types;
7766 int i, n, start_new_bblock, dreg;
7767 int num_calls = 0, inline_costs = 0;
7768 int breakpoint_id = 0;
7770 MonoBoolean security, pinvoke;
7771 MonoSecurityManager* secman = NULL;
7772 MonoDeclSecurityActions actions;
7773 GSList *class_inits = NULL;
7774 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7776 gboolean init_locals, seq_points, skip_dead_blocks;
7777 gboolean sym_seq_points = FALSE;
7778 MonoInst *cached_tls_addr = NULL;
7779 MonoDebugMethodInfo *minfo;
7780 MonoBitSet *seq_point_locs = NULL;
7781 MonoBitSet *seq_point_set_locs = NULL;
7783 cfg->disable_inline = is_jit_optimizer_disabled (method);
7785 /* serialization and xdomain stuff may need access to private fields and methods */
7786 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7787 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7788 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7789 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7790 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7791 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7793 dont_verify |= mono_security_smcs_hack_enabled ();
7795 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7796 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7797 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7798 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7799 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7801 image = method->klass->image;
7802 header = mono_method_get_header (method);
7804 MonoLoaderError *error;
7806 if ((error = mono_loader_get_last_error ())) {
7807 mono_cfg_set_exception (cfg, error->exception_type);
7809 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7810 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7812 goto exception_exit;
7814 generic_container = mono_method_get_generic_container (method);
7815 sig = mono_method_signature (method);
7816 num_args = sig->hasthis + sig->param_count;
7817 ip = (unsigned char*)header->code;
7818 cfg->cil_start = ip;
7819 end = ip + header->code_size;
7820 cfg->stat_cil_code_size += header->code_size;
7822 seq_points = cfg->gen_seq_points && cfg->method == method;
7824 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7825 /* We could hit a seq point before attaching to the JIT (#8338) */
7829 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7830 minfo = mono_debug_lookup_method (method);
7832 int i, n_il_offsets;
7836 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7837 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7838 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7839 sym_seq_points = TRUE;
7840 for (i = 0; i < n_il_offsets; ++i) {
7841 if (il_offsets [i] < header->code_size)
7842 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7844 g_free (il_offsets);
7845 g_free (line_numbers);
7846 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7847 /* Methods without line number info like auto-generated property accessors */
7848 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7849 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7850 sym_seq_points = TRUE;
7855 * Methods without init_locals set could cause asserts in various passes
7856 * (#497220). To work around this, we emit dummy initialization opcodes
7857 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7858 * on some platforms.
7860 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7861 init_locals = header->init_locals;
7865 method_definition = method;
7866 while (method_definition->is_inflated) {
7867 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7868 method_definition = imethod->declaring;
7871 /* SkipVerification is not allowed if core-clr is enabled */
7872 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7874 dont_verify_stloc = TRUE;
7877 if (sig->is_inflated)
7878 generic_context = mono_method_get_context (method);
7879 else if (generic_container)
7880 generic_context = &generic_container->context;
7881 cfg->generic_context = generic_context;
7883 if (!cfg->generic_sharing_context)
7884 g_assert (!sig->has_type_parameters);
7886 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7887 g_assert (method->is_inflated);
7888 g_assert (mono_method_get_context (method)->method_inst);
7890 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7891 g_assert (sig->generic_param_count);
7893 if (cfg->method == method) {
7894 cfg->real_offset = 0;
7896 cfg->real_offset = inline_offset;
7899 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7900 cfg->cil_offset_to_bb_len = header->code_size;
7902 cfg->current_method = method;
7904 if (cfg->verbose_level > 2)
7905 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7907 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7909 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7910 for (n = 0; n < sig->param_count; ++n)
7911 param_types [n + sig->hasthis] = sig->params [n];
7912 cfg->arg_types = param_types;
7914 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7915 if (cfg->method == method) {
7917 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7918 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7921 NEW_BBLOCK (cfg, start_bblock);
7922 cfg->bb_entry = start_bblock;
7923 start_bblock->cil_code = NULL;
7924 start_bblock->cil_length = 0;
7925 #if defined(__native_client_codegen__)
7926 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7927 ins->dreg = alloc_dreg (cfg, STACK_I4);
7928 MONO_ADD_INS (start_bblock, ins);
7932 NEW_BBLOCK (cfg, end_bblock);
7933 cfg->bb_exit = end_bblock;
7934 end_bblock->cil_code = NULL;
7935 end_bblock->cil_length = 0;
7936 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7937 g_assert (cfg->num_bblocks == 2);
7939 arg_array = cfg->args;
7941 if (header->num_clauses) {
7942 cfg->spvars = g_hash_table_new (NULL, NULL);
7943 cfg->exvars = g_hash_table_new (NULL, NULL);
7945 /* handle exception clauses */
7946 for (i = 0; i < header->num_clauses; ++i) {
7947 MonoBasicBlock *try_bb;
7948 MonoExceptionClause *clause = &header->clauses [i];
7949 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7950 try_bb->real_offset = clause->try_offset;
7951 try_bb->try_start = TRUE;
7952 try_bb->region = ((i + 1) << 8) | clause->flags;
7953 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7954 tblock->real_offset = clause->handler_offset;
7955 tblock->flags |= BB_EXCEPTION_HANDLER;
7958 * Linking the try block with the EH block hinders inlining as we won't be able to
7959 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7961 if (COMPILE_LLVM (cfg))
7962 link_bblock (cfg, try_bb, tblock);
7964 if (*(ip + clause->handler_offset) == CEE_POP)
7965 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7967 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7968 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7969 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7970 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7971 MONO_ADD_INS (tblock, ins);
7973 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7974 /* finally clauses already have a seq point */
7975 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7976 MONO_ADD_INS (tblock, ins);
7979 /* todo: is a fault block unsafe to optimize? */
7980 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7981 tblock->flags |= BB_EXCEPTION_UNSAFE;
7985 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7987 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7989 /* catch and filter blocks get the exception object on the stack */
7990 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7991 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7992 MonoInst *dummy_use;
7994 /* mostly like handle_stack_args (), but just sets the input args */
7995 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7996 tblock->in_scount = 1;
7997 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7998 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8001 * Add a dummy use for the exvar so its liveness info will be
8005 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8007 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8008 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8009 tblock->flags |= BB_EXCEPTION_HANDLER;
8010 tblock->real_offset = clause->data.filter_offset;
8011 tblock->in_scount = 1;
8012 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8013 /* The filter block shares the exvar with the handler block */
8014 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8015 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8016 MONO_ADD_INS (tblock, ins);
8020 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8021 clause->data.catch_class &&
8022 cfg->generic_sharing_context &&
8023 mono_class_check_context_used (clause->data.catch_class)) {
8025 * In shared generic code with catch
8026 * clauses containing type variables
8027 * the exception handling code has to
8028 * be able to get to the rgctx.
8029 * Therefore we have to make sure that
8030 * the vtable/mrgctx argument (for
8031 * static or generic methods) or the
8032 * "this" argument (for non-static
8033 * methods) are live.
8035 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8036 mini_method_get_context (method)->method_inst ||
8037 method->klass->valuetype) {
8038 mono_get_vtable_var (cfg);
8040 MonoInst *dummy_use;
8042 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8047 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8048 cfg->cbb = start_bblock;
8049 cfg->args = arg_array;
8050 mono_save_args (cfg, sig, inline_args);
8053 /* FIRST CODE BLOCK */
8054 NEW_BBLOCK (cfg, bblock);
8055 bblock->cil_code = ip;
8059 ADD_BBLOCK (cfg, bblock);
8061 if (cfg->method == method) {
8062 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8063 if (breakpoint_id) {
8064 MONO_INST_NEW (cfg, ins, OP_BREAK);
8065 MONO_ADD_INS (bblock, ins);
8069 if (mono_security_cas_enabled ())
8070 secman = mono_security_manager_get_methods ();
8072 security = (secman && mono_security_method_has_declsec (method));
8073 /* at this point having security doesn't mean we have any code to generate */
8074 if (security && (cfg->method == method)) {
8075 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8076 * And we do not want to enter the next section (with allocation) if we
8077 * have nothing to generate */
8078 security = mono_declsec_get_demands (method, &actions);
8081 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8082 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8084 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8085 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8086 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8088 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8089 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8093 mono_custom_attrs_free (custom);
8096 custom = mono_custom_attrs_from_class (wrapped->klass);
8097 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8101 mono_custom_attrs_free (custom);
8104 /* not a P/Invoke after all */
8109 /* we use a separate basic block for the initialization code */
8110 NEW_BBLOCK (cfg, init_localsbb);
8111 cfg->bb_init = init_localsbb;
8112 init_localsbb->real_offset = cfg->real_offset;
8113 start_bblock->next_bb = init_localsbb;
8114 init_localsbb->next_bb = bblock;
8115 link_bblock (cfg, start_bblock, init_localsbb);
8116 link_bblock (cfg, init_localsbb, bblock);
8118 cfg->cbb = init_localsbb;
8120 if (cfg->gsharedvt && cfg->method == method) {
8121 MonoGSharedVtMethodInfo *info;
8122 MonoInst *var, *locals_var;
8125 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8126 info->method = cfg->method;
8127 info->count_entries = 16;
8128 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8129 cfg->gsharedvt_info = info;
8131 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8132 /* prevent it from being register allocated */
8133 //var->flags |= MONO_INST_VOLATILE;
8134 cfg->gsharedvt_info_var = var;
8136 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8137 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8139 /* Allocate locals */
8140 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8141 /* prevent it from being register allocated */
8142 //locals_var->flags |= MONO_INST_VOLATILE;
8143 cfg->gsharedvt_locals_var = locals_var;
8145 dreg = alloc_ireg (cfg);
8146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8148 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8149 ins->dreg = locals_var->dreg;
8151 MONO_ADD_INS (cfg->cbb, ins);
8152 cfg->gsharedvt_locals_var_ins = ins;
8154 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8157 ins->flags |= MONO_INST_INIT;
8161 /* at this point we know, if security is TRUE, that some code needs to be generated */
8162 if (security && (cfg->method == method)) {
8165 cfg->stat_cas_demand_generation++;
8167 if (actions.demand.blob) {
8168 /* Add code for SecurityAction.Demand */
8169 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8170 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8171 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8172 mono_emit_method_call (cfg, secman->demand, args, NULL);
8174 if (actions.noncasdemand.blob) {
8175 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8176 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8177 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8178 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8179 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8180 mono_emit_method_call (cfg, secman->demand, args, NULL);
8182 if (actions.demandchoice.blob) {
8183 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8184 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8185 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8186 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8187 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8191 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8193 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8196 if (mono_security_core_clr_enabled ()) {
8197 /* check if this is native code, e.g. an icall or a p/invoke */
8198 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8199 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8201 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8202 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8204 /* if this ia a native call then it can only be JITted from platform code */
8205 if ((icall || pinvk) && method->klass && method->klass->image) {
8206 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8207 MonoException *ex = icall ? mono_get_exception_security () :
8208 mono_get_exception_method_access ();
8209 emit_throw_exception (cfg, ex);
8216 CHECK_CFG_EXCEPTION;
8218 if (header->code_size == 0)
8221 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8226 if (cfg->method == method)
8227 mono_debug_init_method (cfg, bblock, breakpoint_id);
8229 for (n = 0; n < header->num_locals; ++n) {
8230 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8235 /* We force the vtable variable here for all shared methods
8236 for the possibility that they might show up in a stack
8237 trace where their exact instantiation is needed. */
8238 if (cfg->generic_sharing_context && method == cfg->method) {
8239 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8240 mini_method_get_context (method)->method_inst ||
8241 method->klass->valuetype) {
8242 mono_get_vtable_var (cfg);
8244 /* FIXME: Is there a better way to do this?
8245 We need the variable live for the duration
8246 of the whole method. */
8247 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8251 /* add a check for this != NULL to inlined methods */
8252 if (is_virtual_call) {
8255 NEW_ARGLOAD (cfg, arg_ins, 0);
8256 MONO_ADD_INS (cfg->cbb, arg_ins);
8257 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8260 skip_dead_blocks = !dont_verify;
8261 if (skip_dead_blocks) {
8262 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8267 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8268 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8271 start_new_bblock = 0;
8274 if (cfg->method == method)
8275 cfg->real_offset = ip - header->code;
8277 cfg->real_offset = inline_offset;
8282 if (start_new_bblock) {
8283 bblock->cil_length = ip - bblock->cil_code;
8284 if (start_new_bblock == 2) {
8285 g_assert (ip == tblock->cil_code);
8287 GET_BBLOCK (cfg, tblock, ip);
8289 bblock->next_bb = tblock;
8292 start_new_bblock = 0;
8293 for (i = 0; i < bblock->in_scount; ++i) {
8294 if (cfg->verbose_level > 3)
8295 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8296 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8300 g_slist_free (class_inits);
8303 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8304 link_bblock (cfg, bblock, tblock);
8305 if (sp != stack_start) {
8306 handle_stack_args (cfg, stack_start, sp - stack_start);
8308 CHECK_UNVERIFIABLE (cfg);
8310 bblock->next_bb = tblock;
8313 for (i = 0; i < bblock->in_scount; ++i) {
8314 if (cfg->verbose_level > 3)
8315 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8316 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8319 g_slist_free (class_inits);
8324 if (skip_dead_blocks) {
8325 int ip_offset = ip - header->code;
8327 if (ip_offset == bb->end)
8331 int op_size = mono_opcode_size (ip, end);
8332 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8334 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8336 if (ip_offset + op_size == bb->end) {
8337 MONO_INST_NEW (cfg, ins, OP_NOP);
8338 MONO_ADD_INS (bblock, ins);
8339 start_new_bblock = 1;
8347 * Sequence points are points where the debugger can place a breakpoint.
8348 * Currently, we generate these automatically at points where the IL
8351 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8353 * Make methods interruptable at the beginning, and at the targets of
8354 * backward branches.
8355 * Also, do this at the start of every bblock in methods with clauses too,
8356 * to be able to handle instructions with inprecise control flow like
8358 * Backward branches are handled at the end of method-to-ir ().
8360 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8362 /* Avoid sequence points on empty IL like .volatile */
8363 // FIXME: Enable this
8364 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8365 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8366 if (sp != stack_start)
8367 ins->flags |= MONO_INST_NONEMPTY_STACK;
8368 MONO_ADD_INS (cfg->cbb, ins);
8371 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8374 bblock->real_offset = cfg->real_offset;
8376 if ((cfg->method == method) && cfg->coverage_info) {
8377 guint32 cil_offset = ip - header->code;
8378 cfg->coverage_info->data [cil_offset].cil_code = ip;
8380 /* TODO: Use an increment here */
8381 #if defined(TARGET_X86)
8382 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8383 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8385 MONO_ADD_INS (cfg->cbb, ins);
8387 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8388 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8392 if (cfg->verbose_level > 3)
8393 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8397 if (seq_points && !sym_seq_points && sp != stack_start) {
8399 * The C# compiler uses these nops to notify the JIT that it should
8400 * insert seq points.
8402 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8403 MONO_ADD_INS (cfg->cbb, ins);
8405 if (cfg->keep_cil_nops)
8406 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8408 MONO_INST_NEW (cfg, ins, OP_NOP);
8410 MONO_ADD_INS (bblock, ins);
8413 if (should_insert_brekpoint (cfg->method)) {
8414 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8416 MONO_INST_NEW (cfg, ins, OP_NOP);
8419 MONO_ADD_INS (bblock, ins);
8425 CHECK_STACK_OVF (1);
8426 n = (*ip)-CEE_LDARG_0;
8428 EMIT_NEW_ARGLOAD (cfg, ins, n);
8436 CHECK_STACK_OVF (1);
8437 n = (*ip)-CEE_LDLOC_0;
8439 EMIT_NEW_LOCLOAD (cfg, ins, n);
8448 n = (*ip)-CEE_STLOC_0;
8451 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8453 emit_stloc_ir (cfg, sp, header, n);
8460 CHECK_STACK_OVF (1);
8463 EMIT_NEW_ARGLOAD (cfg, ins, n);
8469 CHECK_STACK_OVF (1);
8472 NEW_ARGLOADA (cfg, ins, n);
8473 MONO_ADD_INS (cfg->cbb, ins);
8483 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8485 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8490 CHECK_STACK_OVF (1);
8493 EMIT_NEW_LOCLOAD (cfg, ins, n);
8497 case CEE_LDLOCA_S: {
8498 unsigned char *tmp_ip;
8500 CHECK_STACK_OVF (1);
8501 CHECK_LOCAL (ip [1]);
8503 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8509 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8518 CHECK_LOCAL (ip [1]);
8519 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8521 emit_stloc_ir (cfg, sp, header, ip [1]);
8526 CHECK_STACK_OVF (1);
8527 EMIT_NEW_PCONST (cfg, ins, NULL);
8528 ins->type = STACK_OBJ;
8533 CHECK_STACK_OVF (1);
8534 EMIT_NEW_ICONST (cfg, ins, -1);
8547 CHECK_STACK_OVF (1);
8548 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8554 CHECK_STACK_OVF (1);
8556 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8562 CHECK_STACK_OVF (1);
8563 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8569 CHECK_STACK_OVF (1);
8570 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8571 ins->type = STACK_I8;
8572 ins->dreg = alloc_dreg (cfg, STACK_I8);
8574 ins->inst_l = (gint64)read64 (ip);
8575 MONO_ADD_INS (bblock, ins);
8581 gboolean use_aotconst = FALSE;
8583 #ifdef TARGET_POWERPC
8584 /* FIXME: Clean this up */
8585 if (cfg->compile_aot)
8586 use_aotconst = TRUE;
8589 /* FIXME: we should really allocate this only late in the compilation process */
8590 f = mono_domain_alloc (cfg->domain, sizeof (float));
8592 CHECK_STACK_OVF (1);
8598 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8600 dreg = alloc_freg (cfg);
8601 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8602 ins->type = cfg->r4_stack_type;
8604 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8605 ins->type = cfg->r4_stack_type;
8606 ins->dreg = alloc_dreg (cfg, STACK_R8);
8608 MONO_ADD_INS (bblock, ins);
8618 gboolean use_aotconst = FALSE;
8620 #ifdef TARGET_POWERPC
8621 /* FIXME: Clean this up */
8622 if (cfg->compile_aot)
8623 use_aotconst = TRUE;
8626 /* FIXME: we should really allocate this only late in the compilation process */
8627 d = mono_domain_alloc (cfg->domain, sizeof (double));
8629 CHECK_STACK_OVF (1);
8635 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8637 dreg = alloc_freg (cfg);
8638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8639 ins->type = STACK_R8;
8641 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8642 ins->type = STACK_R8;
8643 ins->dreg = alloc_dreg (cfg, STACK_R8);
8645 MONO_ADD_INS (bblock, ins);
8654 MonoInst *temp, *store;
8656 CHECK_STACK_OVF (1);
8660 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8661 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8663 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8666 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8679 if (sp [0]->type == STACK_R8)
8680 /* we need to pop the value from the x86 FP stack */
8681 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8687 INLINE_FAILURE ("jmp");
8688 GSHAREDVT_FAILURE (*ip);
8691 if (stack_start != sp)
8693 token = read32 (ip + 1);
8694 /* FIXME: check the signature matches */
8695 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8697 if (!cmethod || mono_loader_get_last_error ())
8700 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8701 GENERIC_SHARING_FAILURE (CEE_JMP);
8703 if (mono_security_cas_enabled ())
8704 CHECK_CFG_EXCEPTION;
8706 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8708 if (ARCH_HAVE_OP_TAIL_CALL) {
8709 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8712 /* Handle tail calls similarly to calls */
8713 n = fsig->param_count + fsig->hasthis;
8717 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8718 call->method = cmethod;
8719 call->tail_call = TRUE;
8720 call->signature = mono_method_signature (cmethod);
8721 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8722 call->inst.inst_p0 = cmethod;
8723 for (i = 0; i < n; ++i)
8724 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8726 mono_arch_emit_call (cfg, call);
8727 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8728 MONO_ADD_INS (bblock, (MonoInst*)call);
8730 for (i = 0; i < num_args; ++i)
8731 /* Prevent arguments from being optimized away */
8732 arg_array [i]->flags |= MONO_INST_VOLATILE;
8734 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8735 ins = (MonoInst*)call;
8736 ins->inst_p0 = cmethod;
8737 MONO_ADD_INS (bblock, ins);
8741 start_new_bblock = 1;
8746 MonoMethodSignature *fsig;
8749 token = read32 (ip + 1);
8753 //GSHAREDVT_FAILURE (*ip);
8758 fsig = mini_get_signature (method, token, generic_context);
8760 if (method->dynamic && fsig->pinvoke) {
8764 * This is a call through a function pointer using a pinvoke
8765 * signature. Have to create a wrapper and call that instead.
8766 * FIXME: This is very slow, need to create a wrapper at JIT time
8767 * instead based on the signature.
8769 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8770 EMIT_NEW_PCONST (cfg, args [1], fsig);
8772 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8775 n = fsig->param_count + fsig->hasthis;
8779 //g_assert (!virtual || fsig->hasthis);
8783 inline_costs += 10 * num_calls++;
8786 * Making generic calls out of gsharedvt methods.
8787 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8788 * patching gshared method addresses into a gsharedvt method.
8790 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8792 * We pass the address to the gsharedvt trampoline in the rgctx reg
8794 MonoInst *callee = addr;
8796 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8798 GSHAREDVT_FAILURE (*ip);
8800 addr = emit_get_rgctx_sig (cfg, context_used,
8801 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8802 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8806 /* Prevent inlining of methods with indirect calls */
8807 INLINE_FAILURE ("indirect call");
8809 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8814 * Instead of emitting an indirect call, emit a direct call
8815 * with the contents of the aotconst as the patch info.
8817 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8818 info_type = addr->inst_c1;
8819 info_data = addr->inst_p0;
8821 info_type = addr->inst_right->inst_c1;
8822 info_data = addr->inst_right->inst_left;
8825 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8826 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8831 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8835 /* End of call, INS should contain the result of the call, if any */
8837 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8839 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8842 CHECK_CFG_EXCEPTION;
8846 constrained_class = NULL;
8850 case CEE_CALLVIRT: {
8851 MonoInst *addr = NULL;
8852 MonoMethodSignature *fsig = NULL;
8854 int virtual = *ip == CEE_CALLVIRT;
8855 gboolean pass_imt_from_rgctx = FALSE;
8856 MonoInst *imt_arg = NULL;
8857 MonoInst *keep_this_alive = NULL;
8858 gboolean pass_vtable = FALSE;
8859 gboolean pass_mrgctx = FALSE;
8860 MonoInst *vtable_arg = NULL;
8861 gboolean check_this = FALSE;
8862 gboolean supported_tail_call = FALSE;
8863 gboolean tail_call = FALSE;
8864 gboolean need_seq_point = FALSE;
8865 guint32 call_opcode = *ip;
8866 gboolean emit_widen = TRUE;
8867 gboolean push_res = TRUE;
8868 gboolean skip_ret = FALSE;
8869 gboolean delegate_invoke = FALSE;
8870 gboolean direct_icall = FALSE;
8871 gboolean constrained_partial_call = FALSE;
8872 MonoMethod *cil_method;
8875 token = read32 (ip + 1);
8879 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8880 cil_method = cmethod;
8882 if (constrained_class) {
8883 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8884 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8885 g_assert (!cmethod->klass->valuetype);
8886 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8887 constrained_partial_call = TRUE;
8891 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8892 if (cfg->verbose_level > 2)
8893 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8894 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8895 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8896 cfg->generic_sharing_context)) {
8897 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8901 if (cfg->verbose_level > 2)
8902 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8904 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8906 * This is needed since get_method_constrained can't find
8907 * the method in klass representing a type var.
8908 * The type var is guaranteed to be a reference type in this
8911 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8912 g_assert (!cmethod->klass->valuetype);
8914 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8920 if (!cmethod || mono_loader_get_last_error ())
8922 if (!dont_verify && !cfg->skip_visibility) {
8923 MonoMethod *target_method = cil_method;
8924 if (method->is_inflated) {
8925 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8927 if (!mono_method_can_access_method (method_definition, target_method) &&
8928 !mono_method_can_access_method (method, cil_method))
8929 METHOD_ACCESS_FAILURE (method, cil_method);
8932 if (mono_security_core_clr_enabled ())
8933 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8935 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8936 /* MS.NET seems to silently convert this to a callvirt */
8941 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8942 * converts to a callvirt.
8944 * tests/bug-515884.il is an example of this behavior
8946 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8947 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8948 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8952 if (!cmethod->klass->inited)
8953 if (!mono_class_init (cmethod->klass))
8954 TYPE_LOAD_ERROR (cmethod->klass);
8956 fsig = mono_method_signature (cmethod);
8959 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8960 mini_class_is_system_array (cmethod->klass)) {
8961 array_rank = cmethod->klass->rank;
8962 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8963 direct_icall = TRUE;
8964 } else if (fsig->pinvoke) {
8965 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8966 check_for_pending_exc, cfg->compile_aot);
8967 fsig = mono_method_signature (wrapper);
8968 } else if (constrained_class) {
8970 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8974 mono_save_token_info (cfg, image, token, cil_method);
8976 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8977 need_seq_point = TRUE;
8979 /* Don't support calls made using type arguments for now */
8981 if (cfg->gsharedvt) {
8982 if (mini_is_gsharedvt_signature (cfg, fsig))
8983 GSHAREDVT_FAILURE (*ip);
8987 if (mono_security_cas_enabled ()) {
8988 if (check_linkdemand (cfg, method, cmethod))
8989 INLINE_FAILURE ("linkdemand");
8990 CHECK_CFG_EXCEPTION;
8993 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8994 g_assert_not_reached ();
8996 n = fsig->param_count + fsig->hasthis;
8998 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
9001 if (!cfg->generic_sharing_context)
9002 g_assert (!mono_method_check_context_used (cmethod));
9006 //g_assert (!virtual || fsig->hasthis);
9010 if (constrained_class) {
9011 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
9012 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9013 /* The 'Own method' case below */
9014 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9015 /* 'The type parameter is instantiated as a reference type' case below. */
9017 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
9018 CHECK_CFG_EXCEPTION;
9025 * We have the `constrained.' prefix opcode.
9027 if (constrained_partial_call) {
9028 gboolean need_box = TRUE;
9031 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9032 * called method is not known at compile time either. The called method could end up being
9033 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9034 * to box the receiver.
9035 * A simple solution would be to box always and make a normal virtual call, but that would
9036 * be bad performance wise.
9038 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9040 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9047 MonoBasicBlock *is_ref_bb, *end_bb;
9048 MonoInst *nonbox_call;
9051 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9053 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9054 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9056 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9058 NEW_BBLOCK (cfg, is_ref_bb);
9059 NEW_BBLOCK (cfg, end_bb);
9061 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9063 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9066 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9068 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9071 MONO_START_BB (cfg, is_ref_bb);
9072 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9073 ins->klass = constrained_class;
9074 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9075 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9079 MONO_START_BB (cfg, end_bb);
9082 nonbox_call->dreg = ins->dreg;
9084 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9085 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9086 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9089 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9091 * The type parameter is instantiated as a valuetype,
9092 * but that type doesn't override the method we're
9093 * calling, so we need to box `this'.
9095 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9096 ins->klass = constrained_class;
9097 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9098 CHECK_CFG_EXCEPTION;
9099 } else if (!constrained_class->valuetype) {
9100 int dreg = alloc_ireg_ref (cfg);
9103 * The type parameter is instantiated as a reference
9104 * type. We have a managed pointer on the stack, so
9105 * we need to dereference it here.
9107 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9108 ins->type = STACK_OBJ;
9111 if (cmethod->klass->valuetype) {
9114 /* Interface method */
9117 mono_class_setup_vtable (constrained_class);
9118 CHECK_TYPELOAD (constrained_class);
9119 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9121 TYPE_LOAD_ERROR (constrained_class);
9122 slot = mono_method_get_vtable_slot (cmethod);
9124 TYPE_LOAD_ERROR (cmethod->klass);
9125 cmethod = constrained_class->vtable [ioffset + slot];
9127 if (cmethod->klass == mono_defaults.enum_class) {
9128 /* Enum implements some interfaces, so treat this as the first case */
9129 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9130 ins->klass = constrained_class;
9131 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9132 CHECK_CFG_EXCEPTION;
9137 constrained_class = NULL;
9140 if (check_call_signature (cfg, fsig, sp))
9143 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
9144 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9145 delegate_invoke = TRUE;
9148 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9150 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9151 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9159 * If the callee is a shared method, then its static cctor
9160 * might not get called after the call was patched.
9162 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9163 emit_generic_class_init (cfg, cmethod->klass);
9164 CHECK_TYPELOAD (cmethod->klass);
9167 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9169 if (cfg->generic_sharing_context) {
9170 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9172 context_used = mini_method_check_context_used (cfg, cmethod);
9174 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9175 /* Generic method interface
9176 calls are resolved via a
9177 helper function and don't
9179 if (!cmethod_context || !cmethod_context->method_inst)
9180 pass_imt_from_rgctx = TRUE;
9184 * If a shared method calls another
9185 * shared method then the caller must
9186 * have a generic sharing context
9187 * because the magic trampoline
9188 * requires it. FIXME: We shouldn't
9189 * have to force the vtable/mrgctx
9190 * variable here. Instead there
9191 * should be a flag in the cfg to
9192 * request a generic sharing context.
9195 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9196 mono_get_vtable_var (cfg);
9201 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9203 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9205 CHECK_TYPELOAD (cmethod->klass);
9206 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9211 g_assert (!vtable_arg);
9213 if (!cfg->compile_aot) {
9215 * emit_get_rgctx_method () calls mono_class_vtable () so check
9216 * for type load errors before.
9218 mono_class_setup_vtable (cmethod->klass);
9219 CHECK_TYPELOAD (cmethod->klass);
9222 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9224 /* !marshalbyref is needed to properly handle generic methods + remoting */
9225 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9226 MONO_METHOD_IS_FINAL (cmethod)) &&
9227 !mono_class_is_marshalbyref (cmethod->klass)) {
9234 if (pass_imt_from_rgctx) {
9235 g_assert (!pass_vtable);
9237 imt_arg = emit_get_rgctx_method (cfg, context_used,
9238 cmethod, MONO_RGCTX_INFO_METHOD);
9242 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9244 /* Calling virtual generic methods */
9245 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9246 !(MONO_METHOD_IS_FINAL (cmethod) &&
9247 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9248 fsig->generic_param_count &&
9249 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9250 MonoInst *this_temp, *this_arg_temp, *store;
9251 MonoInst *iargs [4];
9252 gboolean use_imt = FALSE;
9254 g_assert (fsig->is_inflated);
9256 /* Prevent inlining of methods that contain indirect calls */
9257 INLINE_FAILURE ("virtual generic call");
9259 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9260 GSHAREDVT_FAILURE (*ip);
9262 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9263 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9268 g_assert (!imt_arg);
9270 g_assert (cmethod->is_inflated);
9271 imt_arg = emit_get_rgctx_method (cfg, context_used,
9272 cmethod, MONO_RGCTX_INFO_METHOD);
9273 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9275 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9276 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9277 MONO_ADD_INS (bblock, store);
9279 /* FIXME: This should be a managed pointer */
9280 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9282 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9283 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9284 cmethod, MONO_RGCTX_INFO_METHOD);
9285 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9286 addr = mono_emit_jit_icall (cfg,
9287 mono_helper_compile_generic_method, iargs);
9289 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9291 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9298 * Implement a workaround for the inherent races involved in locking:
9304 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9305 * try block, the Exit () won't be executed, see:
9306 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9307 * To work around this, we extend such try blocks to include the last x bytes
9308 * of the Monitor.Enter () call.
9310 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9311 MonoBasicBlock *tbb;
9313 GET_BBLOCK (cfg, tbb, ip + 5);
9315 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9316 * from Monitor.Enter like ArgumentNullException.
9318 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9319 /* Mark this bblock as needing to be extended */
9320 tbb->extend_try_block = TRUE;
9324 /* Conversion to a JIT intrinsic */
9325 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9327 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9328 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9335 if ((cfg->opt & MONO_OPT_INLINE) &&
9336 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9337 mono_method_check_inlining (cfg, cmethod)) {
9339 gboolean always = FALSE;
9341 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9342 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9343 /* Prevent inlining of methods that call wrappers */
9344 INLINE_FAILURE ("wrapper call");
9345 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9349 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9351 cfg->real_offset += 5;
9353 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9354 /* *sp is already set by inline_method */
9359 inline_costs += costs;
9365 /* Tail recursion elimination */
9366 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9367 gboolean has_vtargs = FALSE;
9370 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9371 INLINE_FAILURE ("tail call");
9373 /* keep it simple */
9374 for (i = fsig->param_count - 1; i >= 0; i--) {
9375 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9380 for (i = 0; i < n; ++i)
9381 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9382 MONO_INST_NEW (cfg, ins, OP_BR);
9383 MONO_ADD_INS (bblock, ins);
9384 tblock = start_bblock->out_bb [0];
9385 link_bblock (cfg, bblock, tblock);
9386 ins->inst_target_bb = tblock;
9387 start_new_bblock = 1;
9389 /* skip the CEE_RET, too */
9390 if (ip_in_bb (cfg, bblock, ip + 5))
9397 inline_costs += 10 * num_calls++;
9400 * Making generic calls out of gsharedvt methods.
9401 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9402 * patching gshared method addresses into a gsharedvt method.
9404 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9405 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9406 MonoRgctxInfoType info_type;
9409 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9410 //GSHAREDVT_FAILURE (*ip);
9411 // disable for possible remoting calls
9412 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9413 GSHAREDVT_FAILURE (*ip);
9414 if (fsig->generic_param_count) {
9415 /* virtual generic call */
9416 g_assert (mono_use_imt);
9417 g_assert (!imt_arg);
9418 /* Same as the virtual generic case above */
9419 imt_arg = emit_get_rgctx_method (cfg, context_used,
9420 cmethod, MONO_RGCTX_INFO_METHOD);
9421 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9423 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9424 /* This can happen when we call a fully instantiated iface method */
9425 imt_arg = emit_get_rgctx_method (cfg, context_used,
9426 cmethod, MONO_RGCTX_INFO_METHOD);
9431 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9432 keep_this_alive = sp [0];
9434 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9435 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9437 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9438 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9440 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9444 /* Generic sharing */
9447 * Use this if the callee is gsharedvt sharable too, since
9448 * at runtime we might find an instantiation so the call cannot
9449 * be patched (the 'no_patch' code path in mini-trampolines.c).
9451 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9452 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9453 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9454 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9455 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9456 INLINE_FAILURE ("gshared");
9458 g_assert (cfg->generic_sharing_context && cmethod);
9462 * We are compiling a call to a
9463 * generic method from shared code,
9464 * which means that we have to look up
9465 * the method in the rgctx and do an
9469 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9471 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9472 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9476 /* Direct calls to icalls */
9478 MonoMethod *wrapper;
9481 /* Inline the wrapper */
9482 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9484 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9485 g_assert (costs > 0);
9486 cfg->real_offset += 5;
9488 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9489 /* *sp is already set by inline_method */
9494 inline_costs += costs;
9503 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9504 MonoInst *val = sp [fsig->param_count];
9506 if (val->type == STACK_OBJ) {
9507 MonoInst *iargs [2];
9512 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9515 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9516 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9517 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9518 emit_write_barrier (cfg, addr, val);
9519 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9520 GSHAREDVT_FAILURE (*ip);
9521 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9522 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9524 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9525 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9526 if (!cmethod->klass->element_class->valuetype && !readonly)
9527 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9528 CHECK_TYPELOAD (cmethod->klass);
9531 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9534 g_assert_not_reached ();
9541 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9545 /* Tail prefix / tail call optimization */
9547 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9548 /* FIXME: runtime generic context pointer for jumps? */
9549 /* FIXME: handle this for generic sharing eventually */
9550 if ((ins_flag & MONO_INST_TAILCALL) &&
9551 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9552 supported_tail_call = TRUE;
9554 if (supported_tail_call) {
9557 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9558 INLINE_FAILURE ("tail call");
9560 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9562 if (ARCH_HAVE_OP_TAIL_CALL) {
9563 /* Handle tail calls similarly to normal calls */
9566 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9568 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9569 call->tail_call = TRUE;
9570 call->method = cmethod;
9571 call->signature = mono_method_signature (cmethod);
9574 * We implement tail calls by storing the actual arguments into the
9575 * argument variables, then emitting a CEE_JMP.
9577 for (i = 0; i < n; ++i) {
9578 /* Prevent argument from being register allocated */
9579 arg_array [i]->flags |= MONO_INST_VOLATILE;
9580 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9582 ins = (MonoInst*)call;
9583 ins->inst_p0 = cmethod;
9584 ins->inst_p1 = arg_array [0];
9585 MONO_ADD_INS (bblock, ins);
9586 link_bblock (cfg, bblock, end_bblock);
9587 start_new_bblock = 1;
9589 // FIXME: Eliminate unreachable epilogs
9592 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9593 * only reachable from this call.
9595 GET_BBLOCK (cfg, tblock, ip + 5);
9596 if (tblock == bblock || tblock->in_count == 0)
9605 * Synchronized wrappers.
9606 * Its hard to determine where to replace a method with its synchronized
9607 * wrapper without causing an infinite recursion. The current solution is
9608 * to add the synchronized wrapper in the trampolines, and to
9609 * change the called method to a dummy wrapper, and resolve that wrapper
9610 * to the real method in mono_jit_compile_method ().
9612 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9613 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9614 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9615 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9619 INLINE_FAILURE ("call");
9620 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9621 imt_arg, vtable_arg);
9624 link_bblock (cfg, bblock, end_bblock);
9625 start_new_bblock = 1;
9627 // FIXME: Eliminate unreachable epilogs
9630 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9631 * only reachable from this call.
9633 GET_BBLOCK (cfg, tblock, ip + 5);
9634 if (tblock == bblock || tblock->in_count == 0)
9641 /* End of call, INS should contain the result of the call, if any */
9643 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9646 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9651 if (keep_this_alive) {
9652 MonoInst *dummy_use;
9654 /* See mono_emit_method_call_full () */
9655 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9658 CHECK_CFG_EXCEPTION;
9662 g_assert (*ip == CEE_RET);
9666 constrained_class = NULL;
9668 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9672 if (cfg->method != method) {
9673 /* return from inlined method */
9675 * If in_count == 0, that means the ret is unreachable due to
9676 * being preceeded by a throw. In that case, inline_method () will
9677 * handle setting the return value
9678 * (test case: test_0_inline_throw ()).
9680 if (return_var && cfg->cbb->in_count) {
9681 MonoType *ret_type = mono_method_signature (method)->ret;
9687 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9690 //g_assert (returnvar != -1);
9691 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9692 cfg->ret_var_set = TRUE;
9695 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9697 if (cfg->lmf_var && cfg->cbb->in_count)
9701 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9703 if (seq_points && !sym_seq_points) {
9705 * Place a seq point here too even through the IL stack is not
9706 * empty, so a step over on
9709 * will work correctly.
9711 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9712 MONO_ADD_INS (cfg->cbb, ins);
9715 g_assert (!return_var);
9719 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9722 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9725 if (!cfg->vret_addr) {
9728 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9730 EMIT_NEW_RETLOADA (cfg, ret_addr);
9732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9733 ins->klass = mono_class_from_mono_type (ret_type);
9736 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9737 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9738 MonoInst *iargs [1];
9742 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9743 mono_arch_emit_setret (cfg, method, conv);
9745 mono_arch_emit_setret (cfg, method, *sp);
9748 mono_arch_emit_setret (cfg, method, *sp);
9753 if (sp != stack_start)
9755 MONO_INST_NEW (cfg, ins, OP_BR);
9757 ins->inst_target_bb = end_bblock;
9758 MONO_ADD_INS (bblock, ins);
9759 link_bblock (cfg, bblock, end_bblock);
9760 start_new_bblock = 1;
9764 MONO_INST_NEW (cfg, ins, OP_BR);
9766 target = ip + 1 + (signed char)(*ip);
9768 GET_BBLOCK (cfg, tblock, target);
9769 link_bblock (cfg, bblock, tblock);
9770 ins->inst_target_bb = tblock;
9771 if (sp != stack_start) {
9772 handle_stack_args (cfg, stack_start, sp - stack_start);
9774 CHECK_UNVERIFIABLE (cfg);
9776 MONO_ADD_INS (bblock, ins);
9777 start_new_bblock = 1;
9778 inline_costs += BRANCH_COST;
9792 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9794 target = ip + 1 + *(signed char*)ip;
9800 inline_costs += BRANCH_COST;
9804 MONO_INST_NEW (cfg, ins, OP_BR);
9807 target = ip + 4 + (gint32)read32(ip);
9809 GET_BBLOCK (cfg, tblock, target);
9810 link_bblock (cfg, bblock, tblock);
9811 ins->inst_target_bb = tblock;
9812 if (sp != stack_start) {
9813 handle_stack_args (cfg, stack_start, sp - stack_start);
9815 CHECK_UNVERIFIABLE (cfg);
9818 MONO_ADD_INS (bblock, ins);
9820 start_new_bblock = 1;
9821 inline_costs += BRANCH_COST;
9828 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9829 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9830 guint32 opsize = is_short ? 1 : 4;
9832 CHECK_OPSIZE (opsize);
9834 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9837 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9842 GET_BBLOCK (cfg, tblock, target);
9843 link_bblock (cfg, bblock, tblock);
9844 GET_BBLOCK (cfg, tblock, ip);
9845 link_bblock (cfg, bblock, tblock);
9847 if (sp != stack_start) {
9848 handle_stack_args (cfg, stack_start, sp - stack_start);
9849 CHECK_UNVERIFIABLE (cfg);
9852 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9853 cmp->sreg1 = sp [0]->dreg;
9854 type_from_op (cfg, cmp, sp [0], NULL);
9857 #if SIZEOF_REGISTER == 4
9858 if (cmp->opcode == OP_LCOMPARE_IMM) {
9859 /* Convert it to OP_LCOMPARE */
9860 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9861 ins->type = STACK_I8;
9862 ins->dreg = alloc_dreg (cfg, STACK_I8);
9864 MONO_ADD_INS (bblock, ins);
9865 cmp->opcode = OP_LCOMPARE;
9866 cmp->sreg2 = ins->dreg;
9869 MONO_ADD_INS (bblock, cmp);
9871 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9872 type_from_op (cfg, ins, sp [0], NULL);
9873 MONO_ADD_INS (bblock, ins);
9874 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9875 GET_BBLOCK (cfg, tblock, target);
9876 ins->inst_true_bb = tblock;
9877 GET_BBLOCK (cfg, tblock, ip);
9878 ins->inst_false_bb = tblock;
9879 start_new_bblock = 2;
9882 inline_costs += BRANCH_COST;
9897 MONO_INST_NEW (cfg, ins, *ip);
9899 target = ip + 4 + (gint32)read32(ip);
9905 inline_costs += BRANCH_COST;
9909 MonoBasicBlock **targets;
9910 MonoBasicBlock *default_bblock;
9911 MonoJumpInfoBBTable *table;
9912 int offset_reg = alloc_preg (cfg);
9913 int target_reg = alloc_preg (cfg);
9914 int table_reg = alloc_preg (cfg);
9915 int sum_reg = alloc_preg (cfg);
9916 gboolean use_op_switch;
9920 n = read32 (ip + 1);
9923 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9927 CHECK_OPSIZE (n * sizeof (guint32));
9928 target = ip + n * sizeof (guint32);
9930 GET_BBLOCK (cfg, default_bblock, target);
9931 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9933 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9934 for (i = 0; i < n; ++i) {
9935 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9936 targets [i] = tblock;
9937 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9941 if (sp != stack_start) {
9943 * Link the current bb with the targets as well, so handle_stack_args
9944 * will set their in_stack correctly.
9946 link_bblock (cfg, bblock, default_bblock);
9947 for (i = 0; i < n; ++i)
9948 link_bblock (cfg, bblock, targets [i]);
9950 handle_stack_args (cfg, stack_start, sp - stack_start);
9952 CHECK_UNVERIFIABLE (cfg);
9955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9959 for (i = 0; i < n; ++i)
9960 link_bblock (cfg, bblock, targets [i]);
9962 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9963 table->table = targets;
9964 table->table_size = n;
9966 use_op_switch = FALSE;
9968 /* ARM implements SWITCH statements differently */
9969 /* FIXME: Make it use the generic implementation */
9970 if (!cfg->compile_aot)
9971 use_op_switch = TRUE;
9974 if (COMPILE_LLVM (cfg))
9975 use_op_switch = TRUE;
9977 cfg->cbb->has_jump_table = 1;
9979 if (use_op_switch) {
9980 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9981 ins->sreg1 = src1->dreg;
9982 ins->inst_p0 = table;
9983 ins->inst_many_bb = targets;
9984 ins->klass = GUINT_TO_POINTER (n);
9985 MONO_ADD_INS (cfg->cbb, ins);
9987 if (sizeof (gpointer) == 8)
9988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9992 #if SIZEOF_REGISTER == 8
9993 /* The upper word might not be zero, and we add it to a 64 bit address later */
9994 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9997 if (cfg->compile_aot) {
9998 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10000 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10001 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10002 ins->inst_p0 = table;
10003 ins->dreg = table_reg;
10004 MONO_ADD_INS (cfg->cbb, ins);
10007 /* FIXME: Use load_memindex */
10008 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10009 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10010 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10012 start_new_bblock = 1;
10013 inline_costs += (BRANCH_COST * 2);
10026 case CEE_LDIND_REF:
10033 dreg = alloc_freg (cfg);
10036 dreg = alloc_lreg (cfg);
10038 case CEE_LDIND_REF:
10039 dreg = alloc_ireg_ref (cfg);
10042 dreg = alloc_preg (cfg);
10045 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10046 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10047 if (*ip == CEE_LDIND_R4)
10048 ins->type = cfg->r4_stack_type;
10049 ins->flags |= ins_flag;
10050 MONO_ADD_INS (bblock, ins);
10052 if (ins_flag & MONO_INST_VOLATILE) {
10053 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10054 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10059 case CEE_STIND_REF:
10070 if (ins_flag & MONO_INST_VOLATILE) {
10071 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10072 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10075 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10076 ins->flags |= ins_flag;
10079 MONO_ADD_INS (bblock, ins);
10081 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10082 emit_write_barrier (cfg, sp [0], sp [1]);
10091 MONO_INST_NEW (cfg, ins, (*ip));
10093 ins->sreg1 = sp [0]->dreg;
10094 ins->sreg2 = sp [1]->dreg;
10095 type_from_op (cfg, ins, sp [0], sp [1]);
10097 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10099 /* Use the immediate opcodes if possible */
10100 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10101 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10102 if (imm_opcode != -1) {
10103 ins->opcode = imm_opcode;
10104 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10107 NULLIFY_INS (sp [1]);
10111 MONO_ADD_INS ((cfg)->cbb, (ins));
10113 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10130 MONO_INST_NEW (cfg, ins, (*ip));
10132 ins->sreg1 = sp [0]->dreg;
10133 ins->sreg2 = sp [1]->dreg;
10134 type_from_op (cfg, ins, sp [0], sp [1]);
10136 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10137 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10139 /* FIXME: Pass opcode to is_inst_imm */
10141 /* Use the immediate opcodes if possible */
10142 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10145 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10146 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10147 /* Keep emulated opcodes which are optimized away later */
10148 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10149 imm_opcode = mono_op_to_op_imm (ins->opcode);
10152 if (imm_opcode != -1) {
10153 ins->opcode = imm_opcode;
10154 if (sp [1]->opcode == OP_I8CONST) {
10155 #if SIZEOF_REGISTER == 8
10156 ins->inst_imm = sp [1]->inst_l;
10158 ins->inst_ls_word = sp [1]->inst_ls_word;
10159 ins->inst_ms_word = sp [1]->inst_ms_word;
10163 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10166 /* Might be followed by an instruction added by add_widen_op */
10167 if (sp [1]->next == NULL)
10168 NULLIFY_INS (sp [1]);
10171 MONO_ADD_INS ((cfg)->cbb, (ins));
10173 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10186 case CEE_CONV_OVF_I8:
10187 case CEE_CONV_OVF_U8:
10188 case CEE_CONV_R_UN:
10191 /* Special case this earlier so we have long constants in the IR */
10192 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10193 int data = sp [-1]->inst_c0;
10194 sp [-1]->opcode = OP_I8CONST;
10195 sp [-1]->type = STACK_I8;
10196 #if SIZEOF_REGISTER == 8
10197 if ((*ip) == CEE_CONV_U8)
10198 sp [-1]->inst_c0 = (guint32)data;
10200 sp [-1]->inst_c0 = data;
10202 sp [-1]->inst_ls_word = data;
10203 if ((*ip) == CEE_CONV_U8)
10204 sp [-1]->inst_ms_word = 0;
10206 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10208 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10215 case CEE_CONV_OVF_I4:
10216 case CEE_CONV_OVF_I1:
10217 case CEE_CONV_OVF_I2:
10218 case CEE_CONV_OVF_I:
10219 case CEE_CONV_OVF_U:
10222 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10223 ADD_UNOP (CEE_CONV_OVF_I8);
10230 case CEE_CONV_OVF_U1:
10231 case CEE_CONV_OVF_U2:
10232 case CEE_CONV_OVF_U4:
10235 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10236 ADD_UNOP (CEE_CONV_OVF_U8);
10243 case CEE_CONV_OVF_I1_UN:
10244 case CEE_CONV_OVF_I2_UN:
10245 case CEE_CONV_OVF_I4_UN:
10246 case CEE_CONV_OVF_I8_UN:
10247 case CEE_CONV_OVF_U1_UN:
10248 case CEE_CONV_OVF_U2_UN:
10249 case CEE_CONV_OVF_U4_UN:
10250 case CEE_CONV_OVF_U8_UN:
10251 case CEE_CONV_OVF_I_UN:
10252 case CEE_CONV_OVF_U_UN:
10259 CHECK_CFG_EXCEPTION;
10263 case CEE_ADD_OVF_UN:
10265 case CEE_MUL_OVF_UN:
10267 case CEE_SUB_OVF_UN:
10273 GSHAREDVT_FAILURE (*ip);
10276 token = read32 (ip + 1);
10277 klass = mini_get_class (method, token, generic_context);
10278 CHECK_TYPELOAD (klass);
10280 if (generic_class_is_reference_type (cfg, klass)) {
10281 MonoInst *store, *load;
10282 int dreg = alloc_ireg_ref (cfg);
10284 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10285 load->flags |= ins_flag;
10286 MONO_ADD_INS (cfg->cbb, load);
10288 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10289 store->flags |= ins_flag;
10290 MONO_ADD_INS (cfg->cbb, store);
10292 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10293 emit_write_barrier (cfg, sp [0], sp [1]);
10295 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10301 int loc_index = -1;
10307 token = read32 (ip + 1);
10308 klass = mini_get_class (method, token, generic_context);
10309 CHECK_TYPELOAD (klass);
10311 /* Optimize the common ldobj+stloc combination */
10314 loc_index = ip [6];
10321 loc_index = ip [5] - CEE_STLOC_0;
10328 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10329 CHECK_LOCAL (loc_index);
10331 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10332 ins->dreg = cfg->locals [loc_index]->dreg;
10333 ins->flags |= ins_flag;
10336 if (ins_flag & MONO_INST_VOLATILE) {
10337 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10338 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10344 /* Optimize the ldobj+stobj combination */
10345 /* The reference case ends up being a load+store anyway */
10346 /* Skip this if the operation is volatile. */
10347 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10352 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10359 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10360 ins->flags |= ins_flag;
10363 if (ins_flag & MONO_INST_VOLATILE) {
10364 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10365 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10374 CHECK_STACK_OVF (1);
10376 n = read32 (ip + 1);
10378 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10379 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10380 ins->type = STACK_OBJ;
10383 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10384 MonoInst *iargs [1];
10385 char *str = mono_method_get_wrapper_data (method, n);
10387 if (cfg->compile_aot)
10388 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10390 EMIT_NEW_PCONST (cfg, iargs [0], str);
10391 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10393 if (cfg->opt & MONO_OPT_SHARED) {
10394 MonoInst *iargs [3];
10396 if (cfg->compile_aot) {
10397 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10399 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10400 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10401 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10402 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10403 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10405 if (bblock->out_of_line) {
10406 MonoInst *iargs [2];
10408 if (image == mono_defaults.corlib) {
10410 * Avoid relocations in AOT and save some space by using a
10411 * version of helper_ldstr specialized to mscorlib.
10413 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10414 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10416 /* Avoid creating the string object */
10417 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10418 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10419 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10423 if (cfg->compile_aot) {
10424 NEW_LDSTRCONST (cfg, ins, image, n);
10426 MONO_ADD_INS (bblock, ins);
10429 NEW_PCONST (cfg, ins, NULL);
10430 ins->type = STACK_OBJ;
10431 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10433 OUT_OF_MEMORY_FAILURE;
10436 MONO_ADD_INS (bblock, ins);
10445 MonoInst *iargs [2];
10446 MonoMethodSignature *fsig;
10449 MonoInst *vtable_arg = NULL;
10452 token = read32 (ip + 1);
10453 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10454 if (!cmethod || mono_loader_get_last_error ())
10456 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10459 mono_save_token_info (cfg, image, token, cmethod);
10461 if (!mono_class_init (cmethod->klass))
10462 TYPE_LOAD_ERROR (cmethod->klass);
10464 context_used = mini_method_check_context_used (cfg, cmethod);
10466 if (mono_security_cas_enabled ()) {
10467 if (check_linkdemand (cfg, method, cmethod))
10468 INLINE_FAILURE ("linkdemand");
10469 CHECK_CFG_EXCEPTION;
10470 } else if (mono_security_core_clr_enabled ()) {
10471 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10474 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10475 emit_generic_class_init (cfg, cmethod->klass);
10476 CHECK_TYPELOAD (cmethod->klass);
10480 if (cfg->gsharedvt) {
10481 if (mini_is_gsharedvt_variable_signature (sig))
10482 GSHAREDVT_FAILURE (*ip);
10486 n = fsig->param_count;
10490 * Generate smaller code for the common newobj <exception> instruction in
10491 * argument checking code.
10493 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10494 is_exception_class (cmethod->klass) && n <= 2 &&
10495 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10496 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10497 MonoInst *iargs [3];
10501 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10504 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10507 iargs [1] = sp [0];
10508 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10511 iargs [1] = sp [0];
10512 iargs [2] = sp [1];
10513 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10516 g_assert_not_reached ();
10524 /* move the args to allow room for 'this' in the first position */
10530 /* check_call_signature () requires sp[0] to be set */
10531 this_ins.type = STACK_OBJ;
10532 sp [0] = &this_ins;
10533 if (check_call_signature (cfg, fsig, sp))
10538 if (mini_class_is_system_array (cmethod->klass)) {
10539 *sp = emit_get_rgctx_method (cfg, context_used,
10540 cmethod, MONO_RGCTX_INFO_METHOD);
10542 /* Avoid varargs in the common case */
10543 if (fsig->param_count == 1)
10544 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10545 else if (fsig->param_count == 2)
10546 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10547 else if (fsig->param_count == 3)
10548 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10549 else if (fsig->param_count == 4)
10550 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10552 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10553 } else if (cmethod->string_ctor) {
10554 g_assert (!context_used);
10555 g_assert (!vtable_arg);
10556 /* we simply pass a null pointer */
10557 EMIT_NEW_PCONST (cfg, *sp, NULL);
10558 /* now call the string ctor */
10559 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10561 if (cmethod->klass->valuetype) {
10562 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10563 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10564 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10569 * The code generated by mini_emit_virtual_call () expects
10570 * iargs [0] to be a boxed instance, but luckily the vcall
10571 * will be transformed into a normal call there.
10573 } else if (context_used) {
10574 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10577 MonoVTable *vtable = NULL;
10579 if (!cfg->compile_aot)
10580 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10581 CHECK_TYPELOAD (cmethod->klass);
10584 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10585 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10586 * As a workaround, we call class cctors before allocating objects.
10588 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10589 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10590 if (cfg->verbose_level > 2)
10591 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10592 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10595 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10598 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10601 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10603 /* Now call the actual ctor */
10604 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10605 CHECK_CFG_EXCEPTION;
10608 if (alloc == NULL) {
10610 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10611 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10619 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10620 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10623 case CEE_CASTCLASS:
10627 token = read32 (ip + 1);
10628 klass = mini_get_class (method, token, generic_context);
10629 CHECK_TYPELOAD (klass);
10630 if (sp [0]->type != STACK_OBJ)
10633 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10634 CHECK_CFG_EXCEPTION;
10643 token = read32 (ip + 1);
10644 klass = mini_get_class (method, token, generic_context);
10645 CHECK_TYPELOAD (klass);
10646 if (sp [0]->type != STACK_OBJ)
10649 context_used = mini_class_check_context_used (cfg, klass);
10651 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10652 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10653 MonoInst *args [3];
10660 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10663 if (cfg->compile_aot) {
10664 idx = get_castclass_cache_idx (cfg);
10665 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10667 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10670 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10673 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10674 MonoMethod *mono_isinst;
10675 MonoInst *iargs [1];
10678 mono_isinst = mono_marshal_get_isinst (klass);
10679 iargs [0] = sp [0];
10681 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10682 iargs, ip, cfg->real_offset, TRUE, &bblock);
10683 CHECK_CFG_EXCEPTION;
10684 g_assert (costs > 0);
10687 cfg->real_offset += 5;
10691 inline_costs += costs;
10694 ins = handle_isinst (cfg, klass, *sp, context_used);
10695 CHECK_CFG_EXCEPTION;
10702 case CEE_UNBOX_ANY: {
10703 MonoInst *res, *addr;
10708 token = read32 (ip + 1);
10709 klass = mini_get_class (method, token, generic_context);
10710 CHECK_TYPELOAD (klass);
10712 mono_save_token_info (cfg, image, token, klass);
10714 context_used = mini_class_check_context_used (cfg, klass);
10716 if (mini_is_gsharedvt_klass (cfg, klass)) {
10717 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10719 } else if (generic_class_is_reference_type (cfg, klass)) {
10720 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10721 CHECK_CFG_EXCEPTION;
10722 } else if (mono_class_is_nullable (klass)) {
10723 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10725 addr = handle_unbox (cfg, klass, sp, context_used);
10727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10738 MonoClass *enum_class;
10739 MonoMethod *has_flag;
10745 token = read32 (ip + 1);
10746 klass = mini_get_class (method, token, generic_context);
10747 CHECK_TYPELOAD (klass);
10749 mono_save_token_info (cfg, image, token, klass);
10751 context_used = mini_class_check_context_used (cfg, klass);
10753 if (generic_class_is_reference_type (cfg, klass)) {
10759 if (klass == mono_defaults.void_class)
10761 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10763 /* frequent check in generic code: box (struct), brtrue */
10768 * <push int/long ptr>
10771 * constrained. MyFlags
10772 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10774 * If we find this sequence and the operand types on box and constrained
10775 * are equal, we can emit a specialized instruction sequence instead of
10776 * the very slow HasFlag () call.
10778 if ((cfg->opt & MONO_OPT_INTRINS) &&
10779 /* Cheap checks first. */
10780 ip + 5 + 6 + 5 < end &&
10781 ip [5] == CEE_PREFIX1 &&
10782 ip [6] == CEE_CONSTRAINED_ &&
10783 ip [11] == CEE_CALLVIRT &&
10784 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10785 mono_class_is_enum (klass) &&
10786 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10787 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10788 has_flag->klass == mono_defaults.enum_class &&
10789 !strcmp (has_flag->name, "HasFlag") &&
10790 has_flag->signature->hasthis &&
10791 has_flag->signature->param_count == 1) {
10792 CHECK_TYPELOAD (enum_class);
10794 if (enum_class == klass) {
10795 MonoInst *enum_this, *enum_flag;
10800 enum_this = sp [0];
10801 enum_flag = sp [1];
10803 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10808 // FIXME: LLVM can't handle the inconsistent bb linking
10809 if (!mono_class_is_nullable (klass) &&
10810 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10811 (ip [5] == CEE_BRTRUE ||
10812 ip [5] == CEE_BRTRUE_S ||
10813 ip [5] == CEE_BRFALSE ||
10814 ip [5] == CEE_BRFALSE_S)) {
10815 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10817 MonoBasicBlock *true_bb, *false_bb;
10821 if (cfg->verbose_level > 3) {
10822 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10823 printf ("<box+brtrue opt>\n");
10828 case CEE_BRFALSE_S:
10831 target = ip + 1 + (signed char)(*ip);
10838 target = ip + 4 + (gint)(read32 (ip));
10842 g_assert_not_reached ();
10846 * We need to link both bblocks, since it is needed for handling stack
10847 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10848 * Branching to only one of them would lead to inconsistencies, so
10849 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10851 GET_BBLOCK (cfg, true_bb, target);
10852 GET_BBLOCK (cfg, false_bb, ip);
10854 mono_link_bblock (cfg, cfg->cbb, true_bb);
10855 mono_link_bblock (cfg, cfg->cbb, false_bb);
10857 if (sp != stack_start) {
10858 handle_stack_args (cfg, stack_start, sp - stack_start);
10860 CHECK_UNVERIFIABLE (cfg);
10863 if (COMPILE_LLVM (cfg)) {
10864 dreg = alloc_ireg (cfg);
10865 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10868 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10870 /* The JIT can't eliminate the iconst+compare */
10871 MONO_INST_NEW (cfg, ins, OP_BR);
10872 ins->inst_target_bb = is_true ? true_bb : false_bb;
10873 MONO_ADD_INS (cfg->cbb, ins);
10876 start_new_bblock = 1;
10880 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10882 CHECK_CFG_EXCEPTION;
10891 token = read32 (ip + 1);
10892 klass = mini_get_class (method, token, generic_context);
10893 CHECK_TYPELOAD (klass);
10895 mono_save_token_info (cfg, image, token, klass);
10897 context_used = mini_class_check_context_used (cfg, klass);
10899 if (mono_class_is_nullable (klass)) {
10902 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10903 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10907 ins = handle_unbox (cfg, klass, sp, context_used);
10920 MonoClassField *field;
10921 #ifndef DISABLE_REMOTING
10925 gboolean is_instance;
10927 gpointer addr = NULL;
10928 gboolean is_special_static;
10930 MonoInst *store_val = NULL;
10931 MonoInst *thread_ins;
10934 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10936 if (op == CEE_STFLD) {
10939 store_val = sp [1];
10944 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10946 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10949 if (op == CEE_STSFLD) {
10952 store_val = sp [0];
10957 token = read32 (ip + 1);
10958 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10959 field = mono_method_get_wrapper_data (method, token);
10960 klass = field->parent;
10963 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10966 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10967 FIELD_ACCESS_FAILURE (method, field);
10968 mono_class_init (klass);
10970 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10973 /* if the class is Critical then transparent code cannot access it's fields */
10974 if (!is_instance && mono_security_core_clr_enabled ())
10975 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10977 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10978 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10979 if (mono_security_core_clr_enabled ())
10980 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10984 * LDFLD etc. is usable on static fields as well, so convert those cases to
10987 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10999 g_assert_not_reached ();
11001 is_instance = FALSE;
11004 context_used = mini_class_check_context_used (cfg, klass);
11006 /* INSTANCE CASE */
11008 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11009 if (op == CEE_STFLD) {
11010 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11012 #ifndef DISABLE_REMOTING
11013 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11014 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11015 MonoInst *iargs [5];
11017 GSHAREDVT_FAILURE (op);
11019 iargs [0] = sp [0];
11020 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11021 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11022 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11024 iargs [4] = sp [1];
11026 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11027 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11028 iargs, ip, cfg->real_offset, TRUE, &bblock);
11029 CHECK_CFG_EXCEPTION;
11030 g_assert (costs > 0);
11032 cfg->real_offset += 5;
11034 inline_costs += costs;
11036 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11043 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11045 if (mini_is_gsharedvt_klass (cfg, klass)) {
11046 MonoInst *offset_ins;
11048 context_used = mini_class_check_context_used (cfg, klass);
11050 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11051 dreg = alloc_ireg_mp (cfg);
11052 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11053 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11054 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11056 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11058 if (sp [0]->opcode != OP_LDADDR)
11059 store->flags |= MONO_INST_FAULT;
11061 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11062 /* insert call to write barrier */
11066 dreg = alloc_ireg_mp (cfg);
11067 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11068 emit_write_barrier (cfg, ptr, sp [1]);
11071 store->flags |= ins_flag;
11078 #ifndef DISABLE_REMOTING
11079 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11080 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11081 MonoInst *iargs [4];
11083 GSHAREDVT_FAILURE (op);
11085 iargs [0] = sp [0];
11086 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11087 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11088 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11089 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11090 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11091 iargs, ip, cfg->real_offset, TRUE, &bblock);
11092 CHECK_CFG_EXCEPTION;
11093 g_assert (costs > 0);
11095 cfg->real_offset += 5;
11099 inline_costs += costs;
11101 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11107 if (sp [0]->type == STACK_VTYPE) {
11110 /* Have to compute the address of the variable */
11112 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11114 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11116 g_assert (var->klass == klass);
11118 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11122 if (op == CEE_LDFLDA) {
11123 if (is_magic_tls_access (field)) {
11124 GSHAREDVT_FAILURE (*ip);
11126 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
11128 if (sp [0]->type == STACK_OBJ) {
11129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11130 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11133 dreg = alloc_ireg_mp (cfg);
11135 if (mini_is_gsharedvt_klass (cfg, klass)) {
11136 MonoInst *offset_ins;
11138 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11139 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11141 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11143 ins->klass = mono_class_from_mono_type (field->type);
11144 ins->type = STACK_MP;
11150 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11152 if (mini_is_gsharedvt_klass (cfg, klass)) {
11153 MonoInst *offset_ins;
11155 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11156 dreg = alloc_ireg_mp (cfg);
11157 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11158 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11160 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11162 load->flags |= ins_flag;
11163 if (sp [0]->opcode != OP_LDADDR)
11164 load->flags |= MONO_INST_FAULT;
11178 * We can only support shared generic static
11179 * field access on architectures where the
11180 * trampoline code has been extended to handle
11181 * the generic class init.
11183 #ifndef MONO_ARCH_VTABLE_REG
11184 GENERIC_SHARING_FAILURE (op);
11187 context_used = mini_class_check_context_used (cfg, klass);
11189 ftype = mono_field_get_type (field);
11191 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11194 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11195 * to be called here.
11197 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11198 mono_class_vtable (cfg->domain, klass);
11199 CHECK_TYPELOAD (klass);
11201 mono_domain_lock (cfg->domain);
11202 if (cfg->domain->special_static_fields)
11203 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11204 mono_domain_unlock (cfg->domain);
11206 is_special_static = mono_class_field_is_special_static (field);
11208 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11209 thread_ins = mono_get_thread_intrinsic (cfg);
11213 /* Generate IR to compute the field address */
11214 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11216 * Fast access to TLS data
11217 * Inline version of get_thread_static_data () in
11221 int idx, static_data_reg, array_reg, dreg;
11223 GSHAREDVT_FAILURE (op);
11225 // offset &= 0x7fffffff;
11226 // idx = (offset >> 24) - 1;
11227 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11228 MONO_ADD_INS (cfg->cbb, thread_ins);
11229 static_data_reg = alloc_ireg (cfg);
11230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11232 if (cfg->compile_aot) {
11233 int offset_reg, offset2_reg, idx_reg;
11235 /* For TLS variables, this will return the TLS offset */
11236 EMIT_NEW_SFLDACONST (cfg, ins, field);
11237 offset_reg = ins->dreg;
11238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11239 idx_reg = alloc_ireg (cfg);
11240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11243 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11244 array_reg = alloc_ireg (cfg);
11245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11246 offset2_reg = alloc_ireg (cfg);
11247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11248 dreg = alloc_ireg (cfg);
11249 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11251 offset = (gsize)addr & 0x7fffffff;
11252 idx = (offset >> 24) - 1;
11254 array_reg = alloc_ireg (cfg);
11255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11256 dreg = alloc_ireg (cfg);
11257 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11259 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11260 (cfg->compile_aot && is_special_static) ||
11261 (context_used && is_special_static)) {
11262 MonoInst *iargs [2];
11264 g_assert (field->parent);
11265 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11266 if (context_used) {
11267 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11268 field, MONO_RGCTX_INFO_CLASS_FIELD);
11270 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11272 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11273 } else if (context_used) {
11274 MonoInst *static_data;
11277 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11278 method->klass->name_space, method->klass->name, method->name,
11279 depth, field->offset);
11282 if (mono_class_needs_cctor_run (klass, method))
11283 emit_generic_class_init (cfg, klass);
11286 * The pointer we're computing here is
11288 * super_info.static_data + field->offset
11290 static_data = emit_get_rgctx_klass (cfg, context_used,
11291 klass, MONO_RGCTX_INFO_STATIC_DATA);
11293 if (mini_is_gsharedvt_klass (cfg, klass)) {
11294 MonoInst *offset_ins;
11296 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11297 dreg = alloc_ireg_mp (cfg);
11298 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11299 } else if (field->offset == 0) {
11302 int addr_reg = mono_alloc_preg (cfg);
11303 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11305 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11306 MonoInst *iargs [2];
11308 g_assert (field->parent);
11309 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11310 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11311 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11313 MonoVTable *vtable = NULL;
11315 if (!cfg->compile_aot)
11316 vtable = mono_class_vtable (cfg->domain, klass);
11317 CHECK_TYPELOAD (klass);
11320 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11321 if (!(g_slist_find (class_inits, klass))) {
11322 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11323 if (cfg->verbose_level > 2)
11324 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11325 class_inits = g_slist_prepend (class_inits, klass);
11328 if (cfg->run_cctors) {
11330 /* This makes so that inline cannot trigger */
11331 /* .cctors: too many apps depend on them */
11332 /* running with a specific order... */
11334 if (! vtable->initialized)
11335 INLINE_FAILURE ("class init");
11336 ex = mono_runtime_class_init_full (vtable, FALSE);
11338 set_exception_object (cfg, ex);
11339 goto exception_exit;
11343 if (cfg->compile_aot)
11344 EMIT_NEW_SFLDACONST (cfg, ins, field);
11347 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11349 EMIT_NEW_PCONST (cfg, ins, addr);
11352 MonoInst *iargs [1];
11353 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11354 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11358 /* Generate IR to do the actual load/store operation */
11360 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11361 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11362 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11365 if (op == CEE_LDSFLDA) {
11366 ins->klass = mono_class_from_mono_type (ftype);
11367 ins->type = STACK_PTR;
11369 } else if (op == CEE_STSFLD) {
11372 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11373 store->flags |= ins_flag;
11375 gboolean is_const = FALSE;
11376 MonoVTable *vtable = NULL;
11377 gpointer addr = NULL;
11379 if (!context_used) {
11380 vtable = mono_class_vtable (cfg->domain, klass);
11381 CHECK_TYPELOAD (klass);
11383 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11384 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11385 int ro_type = ftype->type;
11387 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11388 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11389 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11392 GSHAREDVT_FAILURE (op);
11394 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11397 case MONO_TYPE_BOOLEAN:
11399 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11403 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11406 case MONO_TYPE_CHAR:
11408 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11412 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11417 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11421 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11426 case MONO_TYPE_PTR:
11427 case MONO_TYPE_FNPTR:
11428 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11429 type_to_eval_stack_type ((cfg), field->type, *sp);
11432 case MONO_TYPE_STRING:
11433 case MONO_TYPE_OBJECT:
11434 case MONO_TYPE_CLASS:
11435 case MONO_TYPE_SZARRAY:
11436 case MONO_TYPE_ARRAY:
11437 if (!mono_gc_is_moving ()) {
11438 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11439 type_to_eval_stack_type ((cfg), field->type, *sp);
11447 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11452 case MONO_TYPE_VALUETYPE:
11462 CHECK_STACK_OVF (1);
11464 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11465 load->flags |= ins_flag;
11471 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11472 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11473 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11484 token = read32 (ip + 1);
11485 klass = mini_get_class (method, token, generic_context);
11486 CHECK_TYPELOAD (klass);
11487 if (ins_flag & MONO_INST_VOLATILE) {
11488 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11489 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11491 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11492 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11493 ins->flags |= ins_flag;
11494 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11495 generic_class_is_reference_type (cfg, klass)) {
11496 /* insert call to write barrier */
11497 emit_write_barrier (cfg, sp [0], sp [1]);
11509 const char *data_ptr;
11511 guint32 field_token;
11517 token = read32 (ip + 1);
11519 klass = mini_get_class (method, token, generic_context);
11520 CHECK_TYPELOAD (klass);
11522 context_used = mini_class_check_context_used (cfg, klass);
11524 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11525 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11526 ins->sreg1 = sp [0]->dreg;
11527 ins->type = STACK_I4;
11528 ins->dreg = alloc_ireg (cfg);
11529 MONO_ADD_INS (cfg->cbb, ins);
11530 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11533 if (context_used) {
11534 MonoInst *args [3];
11535 MonoClass *array_class = mono_array_class_get (klass, 1);
11536 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11538 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11541 args [0] = emit_get_rgctx_klass (cfg, context_used,
11542 array_class, MONO_RGCTX_INFO_VTABLE);
11547 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11549 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11551 if (cfg->opt & MONO_OPT_SHARED) {
11552 /* Decompose now to avoid problems with references to the domainvar */
11553 MonoInst *iargs [3];
11555 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11556 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11557 iargs [2] = sp [0];
11559 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11561 /* Decompose later since it is needed by abcrem */
11562 MonoClass *array_type = mono_array_class_get (klass, 1);
11563 mono_class_vtable (cfg->domain, array_type);
11564 CHECK_TYPELOAD (array_type);
11566 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11567 ins->dreg = alloc_ireg_ref (cfg);
11568 ins->sreg1 = sp [0]->dreg;
11569 ins->inst_newa_class = klass;
11570 ins->type = STACK_OBJ;
11571 ins->klass = array_type;
11572 MONO_ADD_INS (cfg->cbb, ins);
11573 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11574 cfg->cbb->has_array_access = TRUE;
11576 /* Needed so mono_emit_load_get_addr () gets called */
11577 mono_get_got_var (cfg);
11587 * we inline/optimize the initialization sequence if possible.
11588 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11589 * for small sizes open code the memcpy
11590 * ensure the rva field is big enough
11592 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11593 MonoMethod *memcpy_method = get_memcpy_method ();
11594 MonoInst *iargs [3];
11595 int add_reg = alloc_ireg_mp (cfg);
11597 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11598 if (cfg->compile_aot) {
11599 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11601 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11603 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11604 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11613 if (sp [0]->type != STACK_OBJ)
11616 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11617 ins->dreg = alloc_preg (cfg);
11618 ins->sreg1 = sp [0]->dreg;
11619 ins->type = STACK_I4;
11620 /* This flag will be inherited by the decomposition */
11621 ins->flags |= MONO_INST_FAULT;
11622 MONO_ADD_INS (cfg->cbb, ins);
11623 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11624 cfg->cbb->has_array_access = TRUE;
11632 if (sp [0]->type != STACK_OBJ)
11635 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11637 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11638 CHECK_TYPELOAD (klass);
11639 /* we need to make sure that this array is exactly the type it needs
11640 * to be for correctness. the wrappers are lax with their usage
11641 * so we need to ignore them here
11643 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11644 MonoClass *array_class = mono_array_class_get (klass, 1);
11645 mini_emit_check_array_type (cfg, sp [0], array_class);
11646 CHECK_TYPELOAD (array_class);
11650 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11655 case CEE_LDELEM_I1:
11656 case CEE_LDELEM_U1:
11657 case CEE_LDELEM_I2:
11658 case CEE_LDELEM_U2:
11659 case CEE_LDELEM_I4:
11660 case CEE_LDELEM_U4:
11661 case CEE_LDELEM_I8:
11663 case CEE_LDELEM_R4:
11664 case CEE_LDELEM_R8:
11665 case CEE_LDELEM_REF: {
11671 if (*ip == CEE_LDELEM) {
11673 token = read32 (ip + 1);
11674 klass = mini_get_class (method, token, generic_context);
11675 CHECK_TYPELOAD (klass);
11676 mono_class_init (klass);
11679 klass = array_access_to_klass (*ip);
11681 if (sp [0]->type != STACK_OBJ)
11684 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11686 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11687 // FIXME-VT: OP_ICONST optimization
11688 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11689 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11690 ins->opcode = OP_LOADV_MEMBASE;
11691 } else if (sp [1]->opcode == OP_ICONST) {
11692 int array_reg = sp [0]->dreg;
11693 int index_reg = sp [1]->dreg;
11694 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11696 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11697 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11699 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11700 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11703 if (*ip == CEE_LDELEM)
11710 case CEE_STELEM_I1:
11711 case CEE_STELEM_I2:
11712 case CEE_STELEM_I4:
11713 case CEE_STELEM_I8:
11714 case CEE_STELEM_R4:
11715 case CEE_STELEM_R8:
11716 case CEE_STELEM_REF:
11721 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11723 if (*ip == CEE_STELEM) {
11725 token = read32 (ip + 1);
11726 klass = mini_get_class (method, token, generic_context);
11727 CHECK_TYPELOAD (klass);
11728 mono_class_init (klass);
11731 klass = array_access_to_klass (*ip);
11733 if (sp [0]->type != STACK_OBJ)
11736 emit_array_store (cfg, klass, sp, TRUE);
11738 if (*ip == CEE_STELEM)
11745 case CEE_CKFINITE: {
11749 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11750 ins->sreg1 = sp [0]->dreg;
11751 ins->dreg = alloc_freg (cfg);
11752 ins->type = STACK_R8;
11753 MONO_ADD_INS (bblock, ins);
11755 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11760 case CEE_REFANYVAL: {
11761 MonoInst *src_var, *src;
11763 int klass_reg = alloc_preg (cfg);
11764 int dreg = alloc_preg (cfg);
11766 GSHAREDVT_FAILURE (*ip);
11769 MONO_INST_NEW (cfg, ins, *ip);
11772 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11773 CHECK_TYPELOAD (klass);
11775 context_used = mini_class_check_context_used (cfg, klass);
11778 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11780 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11781 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11784 if (context_used) {
11785 MonoInst *klass_ins;
11787 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11788 klass, MONO_RGCTX_INFO_KLASS);
11791 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11792 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11794 mini_emit_class_check (cfg, klass_reg, klass);
11796 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11797 ins->type = STACK_MP;
11802 case CEE_MKREFANY: {
11803 MonoInst *loc, *addr;
11805 GSHAREDVT_FAILURE (*ip);
11808 MONO_INST_NEW (cfg, ins, *ip);
11811 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11812 CHECK_TYPELOAD (klass);
11814 context_used = mini_class_check_context_used (cfg, klass);
11816 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11817 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11819 if (context_used) {
11820 MonoInst *const_ins;
11821 int type_reg = alloc_preg (cfg);
11823 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11826 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11827 } else if (cfg->compile_aot) {
11828 int const_reg = alloc_preg (cfg);
11829 int type_reg = alloc_preg (cfg);
11831 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11834 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11841 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11842 ins->type = STACK_VTYPE;
11843 ins->klass = mono_defaults.typed_reference_class;
11848 case CEE_LDTOKEN: {
11850 MonoClass *handle_class;
11852 CHECK_STACK_OVF (1);
11855 n = read32 (ip + 1);
11857 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11858 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11859 handle = mono_method_get_wrapper_data (method, n);
11860 handle_class = mono_method_get_wrapper_data (method, n + 1);
11861 if (handle_class == mono_defaults.typehandle_class)
11862 handle = &((MonoClass*)handle)->byval_arg;
11865 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11870 mono_class_init (handle_class);
11871 if (cfg->generic_sharing_context) {
11872 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11873 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11874 /* This case handles ldtoken
11875 of an open type, like for
11878 } else if (handle_class == mono_defaults.typehandle_class) {
11879 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11880 } else if (handle_class == mono_defaults.fieldhandle_class)
11881 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11882 else if (handle_class == mono_defaults.methodhandle_class)
11883 context_used = mini_method_check_context_used (cfg, handle);
11885 g_assert_not_reached ();
11888 if ((cfg->opt & MONO_OPT_SHARED) &&
11889 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11890 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11891 MonoInst *addr, *vtvar, *iargs [3];
11892 int method_context_used;
11894 method_context_used = mini_method_check_context_used (cfg, method);
11896 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11898 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11899 EMIT_NEW_ICONST (cfg, iargs [1], n);
11900 if (method_context_used) {
11901 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11902 method, MONO_RGCTX_INFO_METHOD);
11903 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11905 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11906 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11908 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11912 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11914 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11915 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11916 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11917 (cmethod->klass == mono_defaults.systemtype_class) &&
11918 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11919 MonoClass *tclass = mono_class_from_mono_type (handle);
11921 mono_class_init (tclass);
11922 if (context_used) {
11923 ins = emit_get_rgctx_klass (cfg, context_used,
11924 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11925 } else if (cfg->compile_aot) {
11926 if (method->wrapper_type) {
11927 mono_error_init (&error); //got to do it since there are multiple conditionals below
11928 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11929 /* Special case for static synchronized wrappers */
11930 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11932 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11933 /* FIXME: n is not a normal token */
11935 EMIT_NEW_PCONST (cfg, ins, NULL);
11938 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11941 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11943 ins->type = STACK_OBJ;
11944 ins->klass = cmethod->klass;
11947 MonoInst *addr, *vtvar;
11949 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11951 if (context_used) {
11952 if (handle_class == mono_defaults.typehandle_class) {
11953 ins = emit_get_rgctx_klass (cfg, context_used,
11954 mono_class_from_mono_type (handle),
11955 MONO_RGCTX_INFO_TYPE);
11956 } else if (handle_class == mono_defaults.methodhandle_class) {
11957 ins = emit_get_rgctx_method (cfg, context_used,
11958 handle, MONO_RGCTX_INFO_METHOD);
11959 } else if (handle_class == mono_defaults.fieldhandle_class) {
11960 ins = emit_get_rgctx_field (cfg, context_used,
11961 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11963 g_assert_not_reached ();
11965 } else if (cfg->compile_aot) {
11966 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11968 EMIT_NEW_PCONST (cfg, ins, handle);
11970 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11971 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11972 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11982 MONO_INST_NEW (cfg, ins, OP_THROW);
11984 ins->sreg1 = sp [0]->dreg;
11986 bblock->out_of_line = TRUE;
11987 MONO_ADD_INS (bblock, ins);
11988 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11989 MONO_ADD_INS (bblock, ins);
11992 link_bblock (cfg, bblock, end_bblock);
11993 start_new_bblock = 1;
11995 case CEE_ENDFINALLY:
11996 /* mono_save_seq_point_info () depends on this */
11997 if (sp != stack_start)
11998 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11999 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12000 MONO_ADD_INS (bblock, ins);
12002 start_new_bblock = 1;
12005 * Control will leave the method so empty the stack, otherwise
12006 * the next basic block will start with a nonempty stack.
12008 while (sp != stack_start) {
12013 case CEE_LEAVE_S: {
12016 if (*ip == CEE_LEAVE) {
12018 target = ip + 5 + (gint32)read32(ip + 1);
12021 target = ip + 2 + (signed char)(ip [1]);
12024 /* empty the stack */
12025 while (sp != stack_start) {
12030 * If this leave statement is in a catch block, check for a
12031 * pending exception, and rethrow it if necessary.
12032 * We avoid doing this in runtime invoke wrappers, since those are called
12033 * by native code which excepts the wrapper to catch all exceptions.
12035 for (i = 0; i < header->num_clauses; ++i) {
12036 MonoExceptionClause *clause = &header->clauses [i];
12039 * Use <= in the final comparison to handle clauses with multiple
12040 * leave statements, like in bug #78024.
12041 * The ordering of the exception clauses guarantees that we find the
12042 * innermost clause.
12044 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12046 MonoBasicBlock *dont_throw;
12051 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12054 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12056 NEW_BBLOCK (cfg, dont_throw);
12059 * Currently, we always rethrow the abort exception, despite the
12060 * fact that this is not correct. See thread6.cs for an example.
12061 * But propagating the abort exception is more important than
12062 * getting the sematics right.
12064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12066 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12068 MONO_START_BB (cfg, dont_throw);
12073 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12075 MonoExceptionClause *clause;
12077 for (tmp = handlers; tmp; tmp = tmp->next) {
12078 clause = tmp->data;
12079 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12081 link_bblock (cfg, bblock, tblock);
12082 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12083 ins->inst_target_bb = tblock;
12084 ins->inst_eh_block = clause;
12085 MONO_ADD_INS (bblock, ins);
12086 bblock->has_call_handler = 1;
12087 if (COMPILE_LLVM (cfg)) {
12088 MonoBasicBlock *target_bb;
12091 * Link the finally bblock with the target, since it will
12092 * conceptually branch there.
12093 * FIXME: Have to link the bblock containing the endfinally.
12095 GET_BBLOCK (cfg, target_bb, target);
12096 link_bblock (cfg, tblock, target_bb);
12099 g_list_free (handlers);
12102 MONO_INST_NEW (cfg, ins, OP_BR);
12103 MONO_ADD_INS (bblock, ins);
12104 GET_BBLOCK (cfg, tblock, target);
12105 link_bblock (cfg, bblock, tblock);
12106 ins->inst_target_bb = tblock;
12107 start_new_bblock = 1;
12109 if (*ip == CEE_LEAVE)
12118 * Mono specific opcodes
12120 case MONO_CUSTOM_PREFIX: {
12122 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12126 case CEE_MONO_ICALL: {
12128 MonoJitICallInfo *info;
12130 token = read32 (ip + 2);
12131 func = mono_method_get_wrapper_data (method, token);
12132 info = mono_find_jit_icall_by_addr (func);
12134 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12137 CHECK_STACK (info->sig->param_count);
12138 sp -= info->sig->param_count;
12140 ins = mono_emit_jit_icall (cfg, info->func, sp);
12141 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12145 inline_costs += 10 * num_calls++;
12149 case CEE_MONO_LDPTR_CARD_TABLE: {
12151 gpointer card_mask;
12152 CHECK_STACK_OVF (1);
12154 if (cfg->compile_aot)
12155 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12157 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12161 inline_costs += 10 * num_calls++;
12164 case CEE_MONO_LDPTR_NURSERY_START: {
12167 CHECK_STACK_OVF (1);
12169 if (cfg->compile_aot)
12170 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12172 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12176 inline_costs += 10 * num_calls++;
12179 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12180 CHECK_STACK_OVF (1);
12182 if (cfg->compile_aot)
12183 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12185 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12189 inline_costs += 10 * num_calls++;
12192 case CEE_MONO_LDPTR: {
12195 CHECK_STACK_OVF (1);
12197 token = read32 (ip + 2);
12199 ptr = mono_method_get_wrapper_data (method, token);
12200 EMIT_NEW_PCONST (cfg, ins, ptr);
12203 inline_costs += 10 * num_calls++;
12204 /* Can't embed random pointers into AOT code */
12208 case CEE_MONO_JIT_ICALL_ADDR: {
12209 MonoJitICallInfo *callinfo;
12212 CHECK_STACK_OVF (1);
12214 token = read32 (ip + 2);
12216 ptr = mono_method_get_wrapper_data (method, token);
12217 callinfo = mono_find_jit_icall_by_addr (ptr);
12218 g_assert (callinfo);
12219 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12222 inline_costs += 10 * num_calls++;
12225 case CEE_MONO_ICALL_ADDR: {
12226 MonoMethod *cmethod;
12229 CHECK_STACK_OVF (1);
12231 token = read32 (ip + 2);
12233 cmethod = mono_method_get_wrapper_data (method, token);
12235 if (cfg->compile_aot) {
12236 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12238 ptr = mono_lookup_internal_call (cmethod);
12240 EMIT_NEW_PCONST (cfg, ins, ptr);
12246 case CEE_MONO_VTADDR: {
12247 MonoInst *src_var, *src;
12253 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12254 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12259 case CEE_MONO_NEWOBJ: {
12260 MonoInst *iargs [2];
12262 CHECK_STACK_OVF (1);
12264 token = read32 (ip + 2);
12265 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12266 mono_class_init (klass);
12267 NEW_DOMAINCONST (cfg, iargs [0]);
12268 MONO_ADD_INS (cfg->cbb, iargs [0]);
12269 NEW_CLASSCONST (cfg, iargs [1], klass);
12270 MONO_ADD_INS (cfg->cbb, iargs [1]);
12271 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12273 inline_costs += 10 * num_calls++;
12276 case CEE_MONO_OBJADDR:
12279 MONO_INST_NEW (cfg, ins, OP_MOVE);
12280 ins->dreg = alloc_ireg_mp (cfg);
12281 ins->sreg1 = sp [0]->dreg;
12282 ins->type = STACK_MP;
12283 MONO_ADD_INS (cfg->cbb, ins);
12287 case CEE_MONO_LDNATIVEOBJ:
12289 * Similar to LDOBJ, but instead load the unmanaged
12290 * representation of the vtype to the stack.
12295 token = read32 (ip + 2);
12296 klass = mono_method_get_wrapper_data (method, token);
12297 g_assert (klass->valuetype);
12298 mono_class_init (klass);
12301 MonoInst *src, *dest, *temp;
12304 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12305 temp->backend.is_pinvoke = 1;
12306 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12307 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12309 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12310 dest->type = STACK_VTYPE;
12311 dest->klass = klass;
12317 case CEE_MONO_RETOBJ: {
12319 * Same as RET, but return the native representation of a vtype
12322 g_assert (cfg->ret);
12323 g_assert (mono_method_signature (method)->pinvoke);
12328 token = read32 (ip + 2);
12329 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12331 if (!cfg->vret_addr) {
12332 g_assert (cfg->ret_var_is_local);
12334 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12336 EMIT_NEW_RETLOADA (cfg, ins);
12338 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12340 if (sp != stack_start)
12343 MONO_INST_NEW (cfg, ins, OP_BR);
12344 ins->inst_target_bb = end_bblock;
12345 MONO_ADD_INS (bblock, ins);
12346 link_bblock (cfg, bblock, end_bblock);
12347 start_new_bblock = 1;
12351 case CEE_MONO_CISINST:
12352 case CEE_MONO_CCASTCLASS: {
12357 token = read32 (ip + 2);
12358 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12359 if (ip [1] == CEE_MONO_CISINST)
12360 ins = handle_cisinst (cfg, klass, sp [0]);
12362 ins = handle_ccastclass (cfg, klass, sp [0]);
12368 case CEE_MONO_SAVE_LMF:
12369 case CEE_MONO_RESTORE_LMF:
12370 #ifdef MONO_ARCH_HAVE_LMF_OPS
12371 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12372 MONO_ADD_INS (bblock, ins);
12373 cfg->need_lmf_area = TRUE;
12377 case CEE_MONO_CLASSCONST:
12378 CHECK_STACK_OVF (1);
12380 token = read32 (ip + 2);
12381 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12384 inline_costs += 10 * num_calls++;
12386 case CEE_MONO_NOT_TAKEN:
12387 bblock->out_of_line = TRUE;
12390 case CEE_MONO_TLS: {
12393 CHECK_STACK_OVF (1);
12395 key = (gint32)read32 (ip + 2);
12396 g_assert (key < TLS_KEY_NUM);
12398 ins = mono_create_tls_get (cfg, key);
12400 if (cfg->compile_aot) {
12402 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12403 ins->dreg = alloc_preg (cfg);
12404 ins->type = STACK_PTR;
12406 g_assert_not_reached ();
12409 ins->type = STACK_PTR;
12410 MONO_ADD_INS (bblock, ins);
12415 case CEE_MONO_DYN_CALL: {
12416 MonoCallInst *call;
12418 /* It would be easier to call a trampoline, but that would put an
12419 * extra frame on the stack, confusing exception handling. So
12420 * implement it inline using an opcode for now.
12423 if (!cfg->dyn_call_var) {
12424 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12425 /* prevent it from being register allocated */
12426 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12429 /* Has to use a call inst since it local regalloc expects it */
12430 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12431 ins = (MonoInst*)call;
12433 ins->sreg1 = sp [0]->dreg;
12434 ins->sreg2 = sp [1]->dreg;
12435 MONO_ADD_INS (bblock, ins);
12437 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12440 inline_costs += 10 * num_calls++;
12444 case CEE_MONO_MEMORY_BARRIER: {
12446 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12450 case CEE_MONO_JIT_ATTACH: {
12451 MonoInst *args [16], *domain_ins;
12452 MonoInst *ad_ins, *jit_tls_ins;
12453 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12455 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12457 EMIT_NEW_PCONST (cfg, ins, NULL);
12458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12460 ad_ins = mono_get_domain_intrinsic (cfg);
12461 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12463 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12464 NEW_BBLOCK (cfg, next_bb);
12465 NEW_BBLOCK (cfg, call_bb);
12467 if (cfg->compile_aot) {
12468 /* AOT code is only used in the root domain */
12469 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12471 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12473 MONO_ADD_INS (cfg->cbb, ad_ins);
12474 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12477 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12479 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12482 MONO_START_BB (cfg, call_bb);
12485 if (cfg->compile_aot) {
12486 /* AOT code is only used in the root domain */
12487 EMIT_NEW_PCONST (cfg, args [0], NULL);
12489 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12491 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12492 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12495 MONO_START_BB (cfg, next_bb);
12501 case CEE_MONO_JIT_DETACH: {
12502 MonoInst *args [16];
12504 /* Restore the original domain */
12505 dreg = alloc_ireg (cfg);
12506 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12507 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12512 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12518 case CEE_PREFIX1: {
12521 case CEE_ARGLIST: {
12522 /* somewhat similar to LDTOKEN */
12523 MonoInst *addr, *vtvar;
12524 CHECK_STACK_OVF (1);
12525 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12527 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12528 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12530 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12531 ins->type = STACK_VTYPE;
12532 ins->klass = mono_defaults.argumenthandle_class;
12542 MonoInst *cmp, *arg1, *arg2;
12550 * The following transforms:
12551 * CEE_CEQ into OP_CEQ
12552 * CEE_CGT into OP_CGT
12553 * CEE_CGT_UN into OP_CGT_UN
12554 * CEE_CLT into OP_CLT
12555 * CEE_CLT_UN into OP_CLT_UN
12557 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12559 MONO_INST_NEW (cfg, ins, cmp->opcode);
12560 cmp->sreg1 = arg1->dreg;
12561 cmp->sreg2 = arg2->dreg;
12562 type_from_op (cfg, cmp, arg1, arg2);
12564 add_widen_op (cfg, cmp, &arg1, &arg2);
12565 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12566 cmp->opcode = OP_LCOMPARE;
12567 else if (arg1->type == STACK_R4)
12568 cmp->opcode = OP_RCOMPARE;
12569 else if (arg1->type == STACK_R8)
12570 cmp->opcode = OP_FCOMPARE;
12572 cmp->opcode = OP_ICOMPARE;
12573 MONO_ADD_INS (bblock, cmp);
12574 ins->type = STACK_I4;
12575 ins->dreg = alloc_dreg (cfg, ins->type);
12576 type_from_op (cfg, ins, arg1, arg2);
12578 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12580 * The backends expect the fceq opcodes to do the
12583 ins->sreg1 = cmp->sreg1;
12584 ins->sreg2 = cmp->sreg2;
12587 MONO_ADD_INS (bblock, ins);
12593 MonoInst *argconst;
12594 MonoMethod *cil_method;
12596 CHECK_STACK_OVF (1);
12598 n = read32 (ip + 2);
12599 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12600 if (!cmethod || mono_loader_get_last_error ())
12602 mono_class_init (cmethod->klass);
12604 mono_save_token_info (cfg, image, n, cmethod);
12606 context_used = mini_method_check_context_used (cfg, cmethod);
12608 cil_method = cmethod;
12609 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12610 METHOD_ACCESS_FAILURE (method, cil_method);
12612 if (mono_security_cas_enabled ()) {
12613 if (check_linkdemand (cfg, method, cmethod))
12614 INLINE_FAILURE ("linkdemand");
12615 CHECK_CFG_EXCEPTION;
12616 } else if (mono_security_core_clr_enabled ()) {
12617 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12621 * Optimize the common case of ldftn+delegate creation
12623 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12624 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12625 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12626 MonoInst *target_ins, *handle_ins;
12627 MonoMethod *invoke;
12628 int invoke_context_used;
12630 invoke = mono_get_delegate_invoke (ctor_method->klass);
12631 if (!invoke || !mono_method_signature (invoke))
12634 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12636 target_ins = sp [-1];
12638 if (mono_security_core_clr_enabled ())
12639 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12641 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12642 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12643 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12645 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12649 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12650 /* FIXME: SGEN support */
12651 if (invoke_context_used == 0) {
12653 if (cfg->verbose_level > 3)
12654 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12655 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12658 CHECK_CFG_EXCEPTION;
12669 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12670 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12674 inline_costs += 10 * num_calls++;
12677 case CEE_LDVIRTFTN: {
12678 MonoInst *args [2];
12682 n = read32 (ip + 2);
12683 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12684 if (!cmethod || mono_loader_get_last_error ())
12686 mono_class_init (cmethod->klass);
12688 context_used = mini_method_check_context_used (cfg, cmethod);
12690 if (mono_security_cas_enabled ()) {
12691 if (check_linkdemand (cfg, method, cmethod))
12692 INLINE_FAILURE ("linkdemand");
12693 CHECK_CFG_EXCEPTION;
12694 } else if (mono_security_core_clr_enabled ()) {
12695 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12699 * Optimize the common case of ldvirtftn+delegate creation
12701 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12702 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12703 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12704 MonoInst *target_ins, *handle_ins;
12705 MonoMethod *invoke;
12706 int invoke_context_used;
12708 invoke = mono_get_delegate_invoke (ctor_method->klass);
12709 if (!invoke || !mono_method_signature (invoke))
12712 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12714 target_ins = sp [-1];
12716 if (mono_security_core_clr_enabled ())
12717 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12719 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12720 /* FIXME: SGEN support */
12721 if (invoke_context_used == 0) {
12723 if (cfg->verbose_level > 3)
12724 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12725 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12728 CHECK_CFG_EXCEPTION;
12742 args [1] = emit_get_rgctx_method (cfg, context_used,
12743 cmethod, MONO_RGCTX_INFO_METHOD);
12746 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12748 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12751 inline_costs += 10 * num_calls++;
12755 CHECK_STACK_OVF (1);
12757 n = read16 (ip + 2);
12759 EMIT_NEW_ARGLOAD (cfg, ins, n);
12764 CHECK_STACK_OVF (1);
12766 n = read16 (ip + 2);
12768 NEW_ARGLOADA (cfg, ins, n);
12769 MONO_ADD_INS (cfg->cbb, ins);
12777 n = read16 (ip + 2);
12779 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12781 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12785 CHECK_STACK_OVF (1);
12787 n = read16 (ip + 2);
12789 EMIT_NEW_LOCLOAD (cfg, ins, n);
12794 unsigned char *tmp_ip;
12795 CHECK_STACK_OVF (1);
12797 n = read16 (ip + 2);
12800 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12806 EMIT_NEW_LOCLOADA (cfg, ins, n);
12815 n = read16 (ip + 2);
12817 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12819 emit_stloc_ir (cfg, sp, header, n);
12826 if (sp != stack_start)
12828 if (cfg->method != method)
12830 * Inlining this into a loop in a parent could lead to
12831 * stack overflows which is different behavior than the
12832 * non-inlined case, thus disable inlining in this case.
12834 INLINE_FAILURE("localloc");
12836 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12837 ins->dreg = alloc_preg (cfg);
12838 ins->sreg1 = sp [0]->dreg;
12839 ins->type = STACK_PTR;
12840 MONO_ADD_INS (cfg->cbb, ins);
12842 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12844 ins->flags |= MONO_INST_INIT;
12849 case CEE_ENDFILTER: {
12850 MonoExceptionClause *clause, *nearest;
12855 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12857 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12858 ins->sreg1 = (*sp)->dreg;
12859 MONO_ADD_INS (bblock, ins);
12860 start_new_bblock = 1;
12864 for (cc = 0; cc < header->num_clauses; ++cc) {
12865 clause = &header->clauses [cc];
12866 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12867 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12868 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12871 g_assert (nearest);
12872 if ((ip - header->code) != nearest->handler_offset)
12877 case CEE_UNALIGNED_:
12878 ins_flag |= MONO_INST_UNALIGNED;
12879 /* FIXME: record alignment? we can assume 1 for now */
12883 case CEE_VOLATILE_:
12884 ins_flag |= MONO_INST_VOLATILE;
12888 ins_flag |= MONO_INST_TAILCALL;
12889 cfg->flags |= MONO_CFG_HAS_TAIL;
12890 /* Can't inline tail calls at this time */
12891 inline_costs += 100000;
12898 token = read32 (ip + 2);
12899 klass = mini_get_class (method, token, generic_context);
12900 CHECK_TYPELOAD (klass);
12901 if (generic_class_is_reference_type (cfg, klass))
12902 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12904 mini_emit_initobj (cfg, *sp, NULL, klass);
12908 case CEE_CONSTRAINED_:
12910 token = read32 (ip + 2);
12911 constrained_class = mini_get_class (method, token, generic_context);
12912 CHECK_TYPELOAD (constrained_class);
12916 case CEE_INITBLK: {
12917 MonoInst *iargs [3];
12921 /* Skip optimized paths for volatile operations. */
12922 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12923 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12924 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12925 /* emit_memset only works when val == 0 */
12926 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12929 iargs [0] = sp [0];
12930 iargs [1] = sp [1];
12931 iargs [2] = sp [2];
12932 if (ip [1] == CEE_CPBLK) {
12934 * FIXME: It's unclear whether we should be emitting both the acquire
12935 * and release barriers for cpblk. It is technically both a load and
12936 * store operation, so it seems like that's the sensible thing to do.
12938 * FIXME: We emit full barriers on both sides of the operation for
12939 * simplicity. We should have a separate atomic memcpy method instead.
12941 MonoMethod *memcpy_method = get_memcpy_method ();
12943 if (ins_flag & MONO_INST_VOLATILE)
12944 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12946 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12947 call->flags |= ins_flag;
12949 if (ins_flag & MONO_INST_VOLATILE)
12950 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12952 MonoMethod *memset_method = get_memset_method ();
12953 if (ins_flag & MONO_INST_VOLATILE) {
12954 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12955 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12957 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12958 call->flags |= ins_flag;
12969 ins_flag |= MONO_INST_NOTYPECHECK;
12971 ins_flag |= MONO_INST_NORANGECHECK;
12972 /* we ignore the no-nullcheck for now since we
12973 * really do it explicitly only when doing callvirt->call
12977 case CEE_RETHROW: {
12979 int handler_offset = -1;
12981 for (i = 0; i < header->num_clauses; ++i) {
12982 MonoExceptionClause *clause = &header->clauses [i];
12983 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12984 handler_offset = clause->handler_offset;
12989 bblock->flags |= BB_EXCEPTION_UNSAFE;
12991 if (handler_offset == -1)
12994 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12995 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12996 ins->sreg1 = load->dreg;
12997 MONO_ADD_INS (bblock, ins);
12999 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13000 MONO_ADD_INS (bblock, ins);
13003 link_bblock (cfg, bblock, end_bblock);
13004 start_new_bblock = 1;
13012 CHECK_STACK_OVF (1);
13014 token = read32 (ip + 2);
13015 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13016 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13019 val = mono_type_size (type, &ialign);
13021 MonoClass *klass = mini_get_class (method, token, generic_context);
13022 CHECK_TYPELOAD (klass);
13024 val = mono_type_size (&klass->byval_arg, &ialign);
13026 if (mini_is_gsharedvt_klass (cfg, klass))
13027 GSHAREDVT_FAILURE (*ip);
13029 EMIT_NEW_ICONST (cfg, ins, val);
13034 case CEE_REFANYTYPE: {
13035 MonoInst *src_var, *src;
13037 GSHAREDVT_FAILURE (*ip);
13043 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13045 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13046 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13047 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13052 case CEE_READONLY_:
13065 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13075 g_warning ("opcode 0x%02x not handled", *ip);
13079 if (start_new_bblock != 1)
13082 bblock->cil_length = ip - bblock->cil_code;
13083 if (bblock->next_bb) {
13084 /* This could already be set because of inlining, #693905 */
13085 MonoBasicBlock *bb = bblock;
13087 while (bb->next_bb)
13089 bb->next_bb = end_bblock;
13091 bblock->next_bb = end_bblock;
13094 if (cfg->method == method && cfg->domainvar) {
13096 MonoInst *get_domain;
13098 cfg->cbb = init_localsbb;
13100 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13101 MONO_ADD_INS (cfg->cbb, get_domain);
13103 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13105 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13106 MONO_ADD_INS (cfg->cbb, store);
13109 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13110 if (cfg->compile_aot)
13111 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13112 mono_get_got_var (cfg);
13115 if (cfg->method == method && cfg->got_var)
13116 mono_emit_load_got_addr (cfg);
13118 if (init_localsbb) {
13119 cfg->cbb = init_localsbb;
13121 for (i = 0; i < header->num_locals; ++i) {
13122 emit_init_local (cfg, i, header->locals [i], init_locals);
13126 if (cfg->init_ref_vars && cfg->method == method) {
13127 /* Emit initialization for ref vars */
13128 // FIXME: Avoid duplication initialization for IL locals.
13129 for (i = 0; i < cfg->num_varinfo; ++i) {
13130 MonoInst *ins = cfg->varinfo [i];
13132 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13133 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13137 if (cfg->lmf_var && cfg->method == method) {
13138 cfg->cbb = init_localsbb;
13139 emit_push_lmf (cfg);
13142 cfg->cbb = init_localsbb;
13143 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13146 MonoBasicBlock *bb;
13149 * Make seq points at backward branch targets interruptable.
13151 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13152 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13153 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13156 /* Add a sequence point for method entry/exit events */
13157 if (seq_points && cfg->gen_sdb_seq_points) {
13158 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13159 MONO_ADD_INS (init_localsbb, ins);
13160 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13161 MONO_ADD_INS (cfg->bb_exit, ins);
13165 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13166 * the code they refer to was dead (#11880).
13168 if (sym_seq_points) {
13169 for (i = 0; i < header->code_size; ++i) {
13170 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13173 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13174 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13181 if (cfg->method == method) {
13182 MonoBasicBlock *bb;
13183 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13184 bb->region = mono_find_block_region (cfg, bb->real_offset);
13186 mono_create_spvar_for_region (cfg, bb->region);
13187 if (cfg->verbose_level > 2)
13188 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13192 if (inline_costs < 0) {
13195 /* Method is too large */
13196 mname = mono_method_full_name (method, TRUE);
13197 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13198 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13202 if ((cfg->verbose_level > 2) && (cfg->method == method))
13203 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13208 g_assert (!mono_error_ok (&cfg->error));
13212 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13216 set_exception_type_from_invalid_il (cfg, method, ip);
13220 g_slist_free (class_inits);
13221 mono_basic_block_free (original_bb);
13222 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13223 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13224 if (cfg->exception_type)
13227 return inline_costs;
13231 store_membase_reg_to_store_membase_imm (int opcode)
13234 case OP_STORE_MEMBASE_REG:
13235 return OP_STORE_MEMBASE_IMM;
13236 case OP_STOREI1_MEMBASE_REG:
13237 return OP_STOREI1_MEMBASE_IMM;
13238 case OP_STOREI2_MEMBASE_REG:
13239 return OP_STOREI2_MEMBASE_IMM;
13240 case OP_STOREI4_MEMBASE_REG:
13241 return OP_STOREI4_MEMBASE_IMM;
13242 case OP_STOREI8_MEMBASE_REG:
13243 return OP_STOREI8_MEMBASE_IMM;
13245 g_assert_not_reached ();
13252 mono_op_to_op_imm (int opcode)
13256 return OP_IADD_IMM;
13258 return OP_ISUB_IMM;
13260 return OP_IDIV_IMM;
13262 return OP_IDIV_UN_IMM;
13264 return OP_IREM_IMM;
13266 return OP_IREM_UN_IMM;
13268 return OP_IMUL_IMM;
13270 return OP_IAND_IMM;
13274 return OP_IXOR_IMM;
13276 return OP_ISHL_IMM;
13278 return OP_ISHR_IMM;
13280 return OP_ISHR_UN_IMM;
13283 return OP_LADD_IMM;
13285 return OP_LSUB_IMM;
13287 return OP_LAND_IMM;
13291 return OP_LXOR_IMM;
13293 return OP_LSHL_IMM;
13295 return OP_LSHR_IMM;
13297 return OP_LSHR_UN_IMM;
13298 #if SIZEOF_REGISTER == 8
13300 return OP_LREM_IMM;
13304 return OP_COMPARE_IMM;
13306 return OP_ICOMPARE_IMM;
13308 return OP_LCOMPARE_IMM;
13310 case OP_STORE_MEMBASE_REG:
13311 return OP_STORE_MEMBASE_IMM;
13312 case OP_STOREI1_MEMBASE_REG:
13313 return OP_STOREI1_MEMBASE_IMM;
13314 case OP_STOREI2_MEMBASE_REG:
13315 return OP_STOREI2_MEMBASE_IMM;
13316 case OP_STOREI4_MEMBASE_REG:
13317 return OP_STOREI4_MEMBASE_IMM;
13319 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13321 return OP_X86_PUSH_IMM;
13322 case OP_X86_COMPARE_MEMBASE_REG:
13323 return OP_X86_COMPARE_MEMBASE_IMM;
13325 #if defined(TARGET_AMD64)
13326 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13327 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13329 case OP_VOIDCALL_REG:
13330 return OP_VOIDCALL;
13338 return OP_LOCALLOC_IMM;
13345 ldind_to_load_membase (int opcode)
13349 return OP_LOADI1_MEMBASE;
13351 return OP_LOADU1_MEMBASE;
13353 return OP_LOADI2_MEMBASE;
13355 return OP_LOADU2_MEMBASE;
13357 return OP_LOADI4_MEMBASE;
13359 return OP_LOADU4_MEMBASE;
13361 return OP_LOAD_MEMBASE;
13362 case CEE_LDIND_REF:
13363 return OP_LOAD_MEMBASE;
13365 return OP_LOADI8_MEMBASE;
13367 return OP_LOADR4_MEMBASE;
13369 return OP_LOADR8_MEMBASE;
13371 g_assert_not_reached ();
13378 stind_to_store_membase (int opcode)
13382 return OP_STOREI1_MEMBASE_REG;
13384 return OP_STOREI2_MEMBASE_REG;
13386 return OP_STOREI4_MEMBASE_REG;
13388 case CEE_STIND_REF:
13389 return OP_STORE_MEMBASE_REG;
13391 return OP_STOREI8_MEMBASE_REG;
13393 return OP_STORER4_MEMBASE_REG;
13395 return OP_STORER8_MEMBASE_REG;
13397 g_assert_not_reached ();
13404 mono_load_membase_to_load_mem (int opcode)
13406 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13407 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13409 case OP_LOAD_MEMBASE:
13410 return OP_LOAD_MEM;
13411 case OP_LOADU1_MEMBASE:
13412 return OP_LOADU1_MEM;
13413 case OP_LOADU2_MEMBASE:
13414 return OP_LOADU2_MEM;
13415 case OP_LOADI4_MEMBASE:
13416 return OP_LOADI4_MEM;
13417 case OP_LOADU4_MEMBASE:
13418 return OP_LOADU4_MEM;
13419 #if SIZEOF_REGISTER == 8
13420 case OP_LOADI8_MEMBASE:
13421 return OP_LOADI8_MEM;
13430 op_to_op_dest_membase (int store_opcode, int opcode)
13432 #if defined(TARGET_X86)
13433 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13438 return OP_X86_ADD_MEMBASE_REG;
13440 return OP_X86_SUB_MEMBASE_REG;
13442 return OP_X86_AND_MEMBASE_REG;
13444 return OP_X86_OR_MEMBASE_REG;
13446 return OP_X86_XOR_MEMBASE_REG;
13449 return OP_X86_ADD_MEMBASE_IMM;
13452 return OP_X86_SUB_MEMBASE_IMM;
13455 return OP_X86_AND_MEMBASE_IMM;
13458 return OP_X86_OR_MEMBASE_IMM;
13461 return OP_X86_XOR_MEMBASE_IMM;
13467 #if defined(TARGET_AMD64)
13468 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13473 return OP_X86_ADD_MEMBASE_REG;
13475 return OP_X86_SUB_MEMBASE_REG;
13477 return OP_X86_AND_MEMBASE_REG;
13479 return OP_X86_OR_MEMBASE_REG;
13481 return OP_X86_XOR_MEMBASE_REG;
13483 return OP_X86_ADD_MEMBASE_IMM;
13485 return OP_X86_SUB_MEMBASE_IMM;
13487 return OP_X86_AND_MEMBASE_IMM;
13489 return OP_X86_OR_MEMBASE_IMM;
13491 return OP_X86_XOR_MEMBASE_IMM;
13493 return OP_AMD64_ADD_MEMBASE_REG;
13495 return OP_AMD64_SUB_MEMBASE_REG;
13497 return OP_AMD64_AND_MEMBASE_REG;
13499 return OP_AMD64_OR_MEMBASE_REG;
13501 return OP_AMD64_XOR_MEMBASE_REG;
13504 return OP_AMD64_ADD_MEMBASE_IMM;
13507 return OP_AMD64_SUB_MEMBASE_IMM;
13510 return OP_AMD64_AND_MEMBASE_IMM;
13513 return OP_AMD64_OR_MEMBASE_IMM;
13516 return OP_AMD64_XOR_MEMBASE_IMM;
13526 op_to_op_store_membase (int store_opcode, int opcode)
13528 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13531 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13532 return OP_X86_SETEQ_MEMBASE;
13534 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13535 return OP_X86_SETNE_MEMBASE;
13543 op_to_op_src1_membase (int load_opcode, int opcode)
13546 /* FIXME: This has sign extension issues */
13548 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13549 return OP_X86_COMPARE_MEMBASE8_IMM;
13552 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13557 return OP_X86_PUSH_MEMBASE;
13558 case OP_COMPARE_IMM:
13559 case OP_ICOMPARE_IMM:
13560 return OP_X86_COMPARE_MEMBASE_IMM;
13563 return OP_X86_COMPARE_MEMBASE_REG;
13567 #ifdef TARGET_AMD64
13568 /* FIXME: This has sign extension issues */
13570 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13571 return OP_X86_COMPARE_MEMBASE8_IMM;
13576 #ifdef __mono_ilp32__
13577 if (load_opcode == OP_LOADI8_MEMBASE)
13579 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13581 return OP_X86_PUSH_MEMBASE;
13583 /* FIXME: This only works for 32 bit immediates
13584 case OP_COMPARE_IMM:
13585 case OP_LCOMPARE_IMM:
13586 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13587 return OP_AMD64_COMPARE_MEMBASE_IMM;
13589 case OP_ICOMPARE_IMM:
13590 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13591 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13595 #ifdef __mono_ilp32__
13596 if (load_opcode == OP_LOAD_MEMBASE)
13597 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13598 if (load_opcode == OP_LOADI8_MEMBASE)
13600 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13602 return OP_AMD64_COMPARE_MEMBASE_REG;
13605 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13606 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13615 op_to_op_src2_membase (int load_opcode, int opcode)
13618 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13624 return OP_X86_COMPARE_REG_MEMBASE;
13626 return OP_X86_ADD_REG_MEMBASE;
13628 return OP_X86_SUB_REG_MEMBASE;
13630 return OP_X86_AND_REG_MEMBASE;
13632 return OP_X86_OR_REG_MEMBASE;
13634 return OP_X86_XOR_REG_MEMBASE;
13638 #ifdef TARGET_AMD64
13639 #ifdef __mono_ilp32__
13640 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13642 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13646 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13648 return OP_X86_ADD_REG_MEMBASE;
13650 return OP_X86_SUB_REG_MEMBASE;
13652 return OP_X86_AND_REG_MEMBASE;
13654 return OP_X86_OR_REG_MEMBASE;
13656 return OP_X86_XOR_REG_MEMBASE;
13658 #ifdef __mono_ilp32__
13659 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13661 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13666 return OP_AMD64_COMPARE_REG_MEMBASE;
13668 return OP_AMD64_ADD_REG_MEMBASE;
13670 return OP_AMD64_SUB_REG_MEMBASE;
13672 return OP_AMD64_AND_REG_MEMBASE;
13674 return OP_AMD64_OR_REG_MEMBASE;
13676 return OP_AMD64_XOR_REG_MEMBASE;
13685 mono_op_to_op_imm_noemul (int opcode)
13688 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13694 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13701 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13706 return mono_op_to_op_imm (opcode);
13711 * mono_handle_global_vregs:
13713 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13717 mono_handle_global_vregs (MonoCompile *cfg)
13719 gint32 *vreg_to_bb;
13720 MonoBasicBlock *bb;
13723 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13725 #ifdef MONO_ARCH_SIMD_INTRINSICS
13726 if (cfg->uses_simd_intrinsics)
13727 mono_simd_simplify_indirection (cfg);
13730 /* Find local vregs used in more than one bb */
13731 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13732 MonoInst *ins = bb->code;
13733 int block_num = bb->block_num;
13735 if (cfg->verbose_level > 2)
13736 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13739 for (; ins; ins = ins->next) {
13740 const char *spec = INS_INFO (ins->opcode);
13741 int regtype = 0, regindex;
13744 if (G_UNLIKELY (cfg->verbose_level > 2))
13745 mono_print_ins (ins);
13747 g_assert (ins->opcode >= MONO_CEE_LAST);
13749 for (regindex = 0; regindex < 4; regindex ++) {
13752 if (regindex == 0) {
13753 regtype = spec [MONO_INST_DEST];
13754 if (regtype == ' ')
13757 } else if (regindex == 1) {
13758 regtype = spec [MONO_INST_SRC1];
13759 if (regtype == ' ')
13762 } else if (regindex == 2) {
13763 regtype = spec [MONO_INST_SRC2];
13764 if (regtype == ' ')
13767 } else if (regindex == 3) {
13768 regtype = spec [MONO_INST_SRC3];
13769 if (regtype == ' ')
13774 #if SIZEOF_REGISTER == 4
13775 /* In the LLVM case, the long opcodes are not decomposed */
13776 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13778 * Since some instructions reference the original long vreg,
13779 * and some reference the two component vregs, it is quite hard
13780 * to determine when it needs to be global. So be conservative.
13782 if (!get_vreg_to_inst (cfg, vreg)) {
13783 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13785 if (cfg->verbose_level > 2)
13786 printf ("LONG VREG R%d made global.\n", vreg);
13790 * Make the component vregs volatile since the optimizations can
13791 * get confused otherwise.
13793 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13794 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13798 g_assert (vreg != -1);
13800 prev_bb = vreg_to_bb [vreg];
13801 if (prev_bb == 0) {
13802 /* 0 is a valid block num */
13803 vreg_to_bb [vreg] = block_num + 1;
13804 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13805 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13808 if (!get_vreg_to_inst (cfg, vreg)) {
13809 if (G_UNLIKELY (cfg->verbose_level > 2))
13810 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13814 if (vreg_is_ref (cfg, vreg))
13815 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13817 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13820 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13823 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13826 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13829 g_assert_not_reached ();
13833 /* Flag as having been used in more than one bb */
13834 vreg_to_bb [vreg] = -1;
13840 /* If a variable is used in only one bblock, convert it into a local vreg */
13841 for (i = 0; i < cfg->num_varinfo; i++) {
13842 MonoInst *var = cfg->varinfo [i];
13843 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13845 switch (var->type) {
13851 #if SIZEOF_REGISTER == 8
13854 #if !defined(TARGET_X86)
13855 /* Enabling this screws up the fp stack on x86 */
13858 if (mono_arch_is_soft_float ())
13861 /* Arguments are implicitly global */
13862 /* Putting R4 vars into registers doesn't work currently */
13863 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13864 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13866 * Make that the variable's liveness interval doesn't contain a call, since
13867 * that would cause the lvreg to be spilled, making the whole optimization
13870 /* This is too slow for JIT compilation */
13872 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13874 int def_index, call_index, ins_index;
13875 gboolean spilled = FALSE;
13880 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13881 const char *spec = INS_INFO (ins->opcode);
13883 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13884 def_index = ins_index;
13886 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13887 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13888 if (call_index > def_index) {
13894 if (MONO_IS_CALL (ins))
13895 call_index = ins_index;
13905 if (G_UNLIKELY (cfg->verbose_level > 2))
13906 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13907 var->flags |= MONO_INST_IS_DEAD;
13908 cfg->vreg_to_inst [var->dreg] = NULL;
13915 * Compress the varinfo and vars tables so the liveness computation is faster and
13916 * takes up less space.
13919 for (i = 0; i < cfg->num_varinfo; ++i) {
13920 MonoInst *var = cfg->varinfo [i];
13921 if (pos < i && cfg->locals_start == i)
13922 cfg->locals_start = pos;
13923 if (!(var->flags & MONO_INST_IS_DEAD)) {
13925 cfg->varinfo [pos] = cfg->varinfo [i];
13926 cfg->varinfo [pos]->inst_c0 = pos;
13927 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13928 cfg->vars [pos].idx = pos;
13929 #if SIZEOF_REGISTER == 4
13930 if (cfg->varinfo [pos]->type == STACK_I8) {
13931 /* Modify the two component vars too */
13934 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13935 var1->inst_c0 = pos;
13936 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13937 var1->inst_c0 = pos;
13944 cfg->num_varinfo = pos;
13945 if (cfg->locals_start > cfg->num_varinfo)
13946 cfg->locals_start = cfg->num_varinfo;
13950 * mono_spill_global_vars:
13952 * Generate spill code for variables which are not allocated to registers,
13953 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13954 * code is generated which could be optimized by the local optimization passes.
13957 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13959 MonoBasicBlock *bb;
13961 int orig_next_vreg;
13962 guint32 *vreg_to_lvreg;
13964 guint32 i, lvregs_len;
13965 gboolean dest_has_lvreg = FALSE;
13966 guint32 stacktypes [128];
13967 MonoInst **live_range_start, **live_range_end;
13968 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13969 int *gsharedvt_vreg_to_idx = NULL;
13971 *need_local_opts = FALSE;
13973 memset (spec2, 0, sizeof (spec2));
13975 /* FIXME: Move this function to mini.c */
13976 stacktypes ['i'] = STACK_PTR;
13977 stacktypes ['l'] = STACK_I8;
13978 stacktypes ['f'] = STACK_R8;
13979 #ifdef MONO_ARCH_SIMD_INTRINSICS
13980 stacktypes ['x'] = STACK_VTYPE;
13983 #if SIZEOF_REGISTER == 4
13984 /* Create MonoInsts for longs */
13985 for (i = 0; i < cfg->num_varinfo; i++) {
13986 MonoInst *ins = cfg->varinfo [i];
13988 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13989 switch (ins->type) {
13994 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13997 g_assert (ins->opcode == OP_REGOFFSET);
13999 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
14001 tree->opcode = OP_REGOFFSET;
14002 tree->inst_basereg = ins->inst_basereg;
14003 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14005 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
14007 tree->opcode = OP_REGOFFSET;
14008 tree->inst_basereg = ins->inst_basereg;
14009 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14019 if (cfg->compute_gc_maps) {
14020 /* registers need liveness info even for !non refs */
14021 for (i = 0; i < cfg->num_varinfo; i++) {
14022 MonoInst *ins = cfg->varinfo [i];
14024 if (ins->opcode == OP_REGVAR)
14025 ins->flags |= MONO_INST_GC_TRACK;
14029 if (cfg->gsharedvt) {
14030 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14032 for (i = 0; i < cfg->num_varinfo; ++i) {
14033 MonoInst *ins = cfg->varinfo [i];
14036 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
14037 if (i >= cfg->locals_start) {
14039 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14040 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14041 ins->opcode = OP_GSHAREDVT_LOCAL;
14042 ins->inst_imm = idx;
14045 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14046 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14052 /* FIXME: widening and truncation */
14055 * As an optimization, when a variable allocated to the stack is first loaded into
14056 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14057 * the variable again.
14059 orig_next_vreg = cfg->next_vreg;
14060 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14061 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14065 * These arrays contain the first and last instructions accessing a given
14067 * Since we emit bblocks in the same order we process them here, and we
14068 * don't split live ranges, these will precisely describe the live range of
14069 * the variable, i.e. the instruction range where a valid value can be found
14070 * in the variables location.
14071 * The live range is computed using the liveness info computed by the liveness pass.
14072 * We can't use vmv->range, since that is an abstract live range, and we need
14073 * one which is instruction precise.
14074 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14076 /* FIXME: Only do this if debugging info is requested */
14077 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14078 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14079 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14080 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14082 /* Add spill loads/stores */
14083 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14086 if (cfg->verbose_level > 2)
14087 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14089 /* Clear vreg_to_lvreg array */
14090 for (i = 0; i < lvregs_len; i++)
14091 vreg_to_lvreg [lvregs [i]] = 0;
14095 MONO_BB_FOR_EACH_INS (bb, ins) {
14096 const char *spec = INS_INFO (ins->opcode);
14097 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14098 gboolean store, no_lvreg;
14099 int sregs [MONO_MAX_SRC_REGS];
14101 if (G_UNLIKELY (cfg->verbose_level > 2))
14102 mono_print_ins (ins);
14104 if (ins->opcode == OP_NOP)
14108 * We handle LDADDR here as well, since it can only be decomposed
14109 * when variable addresses are known.
14111 if (ins->opcode == OP_LDADDR) {
14112 MonoInst *var = ins->inst_p0;
14114 if (var->opcode == OP_VTARG_ADDR) {
14115 /* Happens on SPARC/S390 where vtypes are passed by reference */
14116 MonoInst *vtaddr = var->inst_left;
14117 if (vtaddr->opcode == OP_REGVAR) {
14118 ins->opcode = OP_MOVE;
14119 ins->sreg1 = vtaddr->dreg;
14121 else if (var->inst_left->opcode == OP_REGOFFSET) {
14122 ins->opcode = OP_LOAD_MEMBASE;
14123 ins->inst_basereg = vtaddr->inst_basereg;
14124 ins->inst_offset = vtaddr->inst_offset;
14127 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14128 /* gsharedvt arg passed by ref */
14129 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14131 ins->opcode = OP_LOAD_MEMBASE;
14132 ins->inst_basereg = var->inst_basereg;
14133 ins->inst_offset = var->inst_offset;
14134 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14135 MonoInst *load, *load2, *load3;
14136 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14137 int reg1, reg2, reg3;
14138 MonoInst *info_var = cfg->gsharedvt_info_var;
14139 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14143 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14146 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14148 g_assert (info_var);
14149 g_assert (locals_var);
14151 /* Mark the instruction used to compute the locals var as used */
14152 cfg->gsharedvt_locals_var_ins = NULL;
14154 /* Load the offset */
14155 if (info_var->opcode == OP_REGOFFSET) {
14156 reg1 = alloc_ireg (cfg);
14157 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14158 } else if (info_var->opcode == OP_REGVAR) {
14160 reg1 = info_var->dreg;
14162 g_assert_not_reached ();
14164 reg2 = alloc_ireg (cfg);
14165 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14166 /* Load the locals area address */
14167 reg3 = alloc_ireg (cfg);
14168 if (locals_var->opcode == OP_REGOFFSET) {
14169 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14170 } else if (locals_var->opcode == OP_REGVAR) {
14171 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14173 g_assert_not_reached ();
14175 /* Compute the address */
14176 ins->opcode = OP_PADD;
14180 mono_bblock_insert_before_ins (bb, ins, load3);
14181 mono_bblock_insert_before_ins (bb, load3, load2);
14183 mono_bblock_insert_before_ins (bb, load2, load);
14185 g_assert (var->opcode == OP_REGOFFSET);
14187 ins->opcode = OP_ADD_IMM;
14188 ins->sreg1 = var->inst_basereg;
14189 ins->inst_imm = var->inst_offset;
14192 *need_local_opts = TRUE;
14193 spec = INS_INFO (ins->opcode);
14196 if (ins->opcode < MONO_CEE_LAST) {
14197 mono_print_ins (ins);
14198 g_assert_not_reached ();
14202 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14206 if (MONO_IS_STORE_MEMBASE (ins)) {
14207 tmp_reg = ins->dreg;
14208 ins->dreg = ins->sreg2;
14209 ins->sreg2 = tmp_reg;
14212 spec2 [MONO_INST_DEST] = ' ';
14213 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14214 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14215 spec2 [MONO_INST_SRC3] = ' ';
14217 } else if (MONO_IS_STORE_MEMINDEX (ins))
14218 g_assert_not_reached ();
14223 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14224 printf ("\t %.3s %d", spec, ins->dreg);
14225 num_sregs = mono_inst_get_src_registers (ins, sregs);
14226 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14227 printf (" %d", sregs [srcindex]);
14234 regtype = spec [MONO_INST_DEST];
14235 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14238 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14239 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14240 MonoInst *store_ins;
14242 MonoInst *def_ins = ins;
14243 int dreg = ins->dreg; /* The original vreg */
14245 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14247 if (var->opcode == OP_REGVAR) {
14248 ins->dreg = var->dreg;
14249 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14251 * Instead of emitting a load+store, use a _membase opcode.
14253 g_assert (var->opcode == OP_REGOFFSET);
14254 if (ins->opcode == OP_MOVE) {
14258 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14259 ins->inst_basereg = var->inst_basereg;
14260 ins->inst_offset = var->inst_offset;
14263 spec = INS_INFO (ins->opcode);
14267 g_assert (var->opcode == OP_REGOFFSET);
14269 prev_dreg = ins->dreg;
14271 /* Invalidate any previous lvreg for this vreg */
14272 vreg_to_lvreg [ins->dreg] = 0;
14276 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14278 store_opcode = OP_STOREI8_MEMBASE_REG;
14281 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14283 #if SIZEOF_REGISTER != 8
14284 if (regtype == 'l') {
14285 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14286 mono_bblock_insert_after_ins (bb, ins, store_ins);
14287 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14288 mono_bblock_insert_after_ins (bb, ins, store_ins);
14289 def_ins = store_ins;
14294 g_assert (store_opcode != OP_STOREV_MEMBASE);
14296 /* Try to fuse the store into the instruction itself */
14297 /* FIXME: Add more instructions */
14298 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14299 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14300 ins->inst_imm = ins->inst_c0;
14301 ins->inst_destbasereg = var->inst_basereg;
14302 ins->inst_offset = var->inst_offset;
14303 spec = INS_INFO (ins->opcode);
14304 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14305 ins->opcode = store_opcode;
14306 ins->inst_destbasereg = var->inst_basereg;
14307 ins->inst_offset = var->inst_offset;
14311 tmp_reg = ins->dreg;
14312 ins->dreg = ins->sreg2;
14313 ins->sreg2 = tmp_reg;
14316 spec2 [MONO_INST_DEST] = ' ';
14317 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14318 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14319 spec2 [MONO_INST_SRC3] = ' ';
14321 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14322 // FIXME: The backends expect the base reg to be in inst_basereg
14323 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14325 ins->inst_basereg = var->inst_basereg;
14326 ins->inst_offset = var->inst_offset;
14327 spec = INS_INFO (ins->opcode);
14329 /* printf ("INS: "); mono_print_ins (ins); */
14330 /* Create a store instruction */
14331 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14333 /* Insert it after the instruction */
14334 mono_bblock_insert_after_ins (bb, ins, store_ins);
14336 def_ins = store_ins;
14339 * We can't assign ins->dreg to var->dreg here, since the
14340 * sregs could use it. So set a flag, and do it after
14343 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14344 dest_has_lvreg = TRUE;
14349 if (def_ins && !live_range_start [dreg]) {
14350 live_range_start [dreg] = def_ins;
14351 live_range_start_bb [dreg] = bb;
14354 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14357 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14358 tmp->inst_c1 = dreg;
14359 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14366 num_sregs = mono_inst_get_src_registers (ins, sregs);
14367 for (srcindex = 0; srcindex < 3; ++srcindex) {
14368 regtype = spec [MONO_INST_SRC1 + srcindex];
14369 sreg = sregs [srcindex];
14371 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14372 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14373 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14374 MonoInst *use_ins = ins;
14375 MonoInst *load_ins;
14376 guint32 load_opcode;
14378 if (var->opcode == OP_REGVAR) {
14379 sregs [srcindex] = var->dreg;
14380 //mono_inst_set_src_registers (ins, sregs);
14381 live_range_end [sreg] = use_ins;
14382 live_range_end_bb [sreg] = bb;
14384 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14387 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14388 /* var->dreg is a hreg */
14389 tmp->inst_c1 = sreg;
14390 mono_bblock_insert_after_ins (bb, ins, tmp);
14396 g_assert (var->opcode == OP_REGOFFSET);
14398 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14400 g_assert (load_opcode != OP_LOADV_MEMBASE);
14402 if (vreg_to_lvreg [sreg]) {
14403 g_assert (vreg_to_lvreg [sreg] != -1);
14405 /* The variable is already loaded to an lvreg */
14406 if (G_UNLIKELY (cfg->verbose_level > 2))
14407 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14408 sregs [srcindex] = vreg_to_lvreg [sreg];
14409 //mono_inst_set_src_registers (ins, sregs);
14413 /* Try to fuse the load into the instruction */
14414 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14415 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14416 sregs [0] = var->inst_basereg;
14417 //mono_inst_set_src_registers (ins, sregs);
14418 ins->inst_offset = var->inst_offset;
14419 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14420 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14421 sregs [1] = var->inst_basereg;
14422 //mono_inst_set_src_registers (ins, sregs);
14423 ins->inst_offset = var->inst_offset;
14425 if (MONO_IS_REAL_MOVE (ins)) {
14426 ins->opcode = OP_NOP;
14429 //printf ("%d ", srcindex); mono_print_ins (ins);
14431 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14433 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14434 if (var->dreg == prev_dreg) {
14436 * sreg refers to the value loaded by the load
14437 * emitted below, but we need to use ins->dreg
14438 * since it refers to the store emitted earlier.
14442 g_assert (sreg != -1);
14443 vreg_to_lvreg [var->dreg] = sreg;
14444 g_assert (lvregs_len < 1024);
14445 lvregs [lvregs_len ++] = var->dreg;
14449 sregs [srcindex] = sreg;
14450 //mono_inst_set_src_registers (ins, sregs);
14452 #if SIZEOF_REGISTER != 8
14453 if (regtype == 'l') {
14454 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14455 mono_bblock_insert_before_ins (bb, ins, load_ins);
14456 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14457 mono_bblock_insert_before_ins (bb, ins, load_ins);
14458 use_ins = load_ins;
14463 #if SIZEOF_REGISTER == 4
14464 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14466 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14467 mono_bblock_insert_before_ins (bb, ins, load_ins);
14468 use_ins = load_ins;
14472 if (var->dreg < orig_next_vreg) {
14473 live_range_end [var->dreg] = use_ins;
14474 live_range_end_bb [var->dreg] = bb;
14477 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14480 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14481 tmp->inst_c1 = var->dreg;
14482 mono_bblock_insert_after_ins (bb, ins, tmp);
14486 mono_inst_set_src_registers (ins, sregs);
14488 if (dest_has_lvreg) {
14489 g_assert (ins->dreg != -1);
14490 vreg_to_lvreg [prev_dreg] = ins->dreg;
14491 g_assert (lvregs_len < 1024);
14492 lvregs [lvregs_len ++] = prev_dreg;
14493 dest_has_lvreg = FALSE;
14497 tmp_reg = ins->dreg;
14498 ins->dreg = ins->sreg2;
14499 ins->sreg2 = tmp_reg;
14502 if (MONO_IS_CALL (ins)) {
14503 /* Clear vreg_to_lvreg array */
14504 for (i = 0; i < lvregs_len; i++)
14505 vreg_to_lvreg [lvregs [i]] = 0;
14507 } else if (ins->opcode == OP_NOP) {
14509 MONO_INST_NULLIFY_SREGS (ins);
14512 if (cfg->verbose_level > 2)
14513 mono_print_ins_index (1, ins);
14516 /* Extend the live range based on the liveness info */
14517 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14518 for (i = 0; i < cfg->num_varinfo; i ++) {
14519 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14521 if (vreg_is_volatile (cfg, vi->vreg))
14522 /* The liveness info is incomplete */
14525 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14526 /* Live from at least the first ins of this bb */
14527 live_range_start [vi->vreg] = bb->code;
14528 live_range_start_bb [vi->vreg] = bb;
14531 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14532 /* Live at least until the last ins of this bb */
14533 live_range_end [vi->vreg] = bb->last_ins;
14534 live_range_end_bb [vi->vreg] = bb;
14540 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14542 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14543 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14545 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14546 for (i = 0; i < cfg->num_varinfo; ++i) {
14547 int vreg = MONO_VARINFO (cfg, i)->vreg;
14550 if (live_range_start [vreg]) {
14551 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14553 ins->inst_c1 = vreg;
14554 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14556 if (live_range_end [vreg]) {
14557 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14559 ins->inst_c1 = vreg;
14560 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14561 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14563 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14569 if (cfg->gsharedvt_locals_var_ins) {
14570 /* Nullify if unused */
14571 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14572 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14575 g_free (live_range_start);
14576 g_free (live_range_end);
14577 g_free (live_range_start_bb);
14578 g_free (live_range_end_bb);
14583 * - use 'iadd' instead of 'int_add'
14584 * - handling ovf opcodes: decompose in method_to_ir.
14585 * - unify iregs/fregs
14586 * -> partly done, the missing parts are:
14587 * - a more complete unification would involve unifying the hregs as well, so
14588 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14589 * would no longer map to the machine hregs, so the code generators would need to
14590 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14591 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14592 * fp/non-fp branches speeds it up by about 15%.
14593 * - use sext/zext opcodes instead of shifts
14595 * - get rid of TEMPLOADs if possible and use vregs instead
14596 * - clean up usage of OP_P/OP_ opcodes
14597 * - cleanup usage of DUMMY_USE
14598 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14600 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14601 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14602 * - make sure handle_stack_args () is called before the branch is emitted
14603 * - when the new IR is done, get rid of all unused stuff
14604 * - COMPARE/BEQ as separate instructions or unify them ?
14605 * - keeping them separate allows specialized compare instructions like
14606 * compare_imm, compare_membase
14607 * - most back ends unify fp compare+branch, fp compare+ceq
14608 * - integrate mono_save_args into inline_method
14609 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14610 * - handle long shift opts on 32 bit platforms somehow: they require
14611 * 3 sregs (2 for arg1 and 1 for arg2)
14612 * - make byref a 'normal' type.
14613 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14614 * variable if needed.
14615 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14616 * like inline_method.
14617 * - remove inlining restrictions
14618 * - fix LNEG and enable cfold of INEG
14619 * - generalize x86 optimizations like ldelema as a peephole optimization
14620 * - add store_mem_imm for amd64
14621 * - optimize the loading of the interruption flag in the managed->native wrappers
14622 * - avoid special handling of OP_NOP in passes
14623 * - move code inserting instructions into one function/macro.
14624 * - try a coalescing phase after liveness analysis
14625 * - add float -> vreg conversion + local optimizations on !x86
14626 * - figure out how to handle decomposed branches during optimizations, ie.
14627 * compare+branch, op_jump_table+op_br etc.
14628 * - promote RuntimeXHandles to vregs
14629 * - vtype cleanups:
14630 * - add a NEW_VARLOADA_VREG macro
14631 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14632 * accessing vtype fields.
14633 * - get rid of I8CONST on 64 bit platforms
14634 * - dealing with the increase in code size due to branches created during opcode
14636 * - use extended basic blocks
14637 * - all parts of the JIT
14638 * - handle_global_vregs () && local regalloc
14639 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14640 * - sources of increase in code size:
14643 * - isinst and castclass
14644 * - lvregs not allocated to global registers even if used multiple times
14645 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14647 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14648 * - add all micro optimizations from the old JIT
14649 * - put tree optimizations into the deadce pass
14650 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14651 * specific function.
14652 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14653 * fcompare + branchCC.
14654 * - create a helper function for allocating a stack slot, taking into account
14655 * MONO_CFG_HAS_SPILLUP.
14657 * - merge the ia64 switch changes.
14658 * - optimize mono_regstate2_alloc_int/float.
14659 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14660 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14661 * parts of the tree could be separated by other instructions, killing the tree
14662 * arguments, or stores killing loads etc. Also, should we fold loads into other
14663 * instructions if the result of the load is used multiple times ?
14664 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14665 * - LAST MERGE: 108395.
14666 * - when returning vtypes in registers, generate IR and append it to the end of the
14667 * last bb instead of doing it in the epilog.
14668 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14676 - When to decompose opcodes:
14677 - earlier: this makes some optimizations hard to implement, since the low level IR
14678 no longer contains the neccessary information. But it is easier to do.
14679 - later: harder to implement, enables more optimizations.
14680 - Branches inside bblocks:
14681 - created when decomposing complex opcodes.
14682 - branches to another bblock: harmless, but not tracked by the branch
14683 optimizations, so need to branch to a label at the start of the bblock.
14684 - branches to inside the same bblock: very problematic, trips up the local
14685 reg allocator. Can be fixed by spitting the current bblock, but that is a
14686 complex operation, since some local vregs can become global vregs etc.
14687 - Local/global vregs:
14688 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14689 local register allocator.
14690 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14691 structure, created by mono_create_var (). Assigned to hregs or the stack by
14692 the global register allocator.
14693 - When to do optimizations like alu->alu_imm:
14694 - earlier -> saves work later on since the IR will be smaller/simpler
14695 - later -> can work on more instructions
14696 - Handling of valuetypes:
14697 - When a vtype is pushed on the stack, a new temporary is created, an
14698 instruction computing its address (LDADDR) is emitted and pushed on
14699 the stack. Need to optimize cases when the vtype is used immediately as in
14700 argument passing, stloc etc.
14701 - Instead of the to_end stuff in the old JIT, simply call the function handling
14702 the values on the stack before emitting the last instruction of the bb.
14705 #endif /* DISABLE_JIT */