2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
29 #ifdef HAVE_SYS_TIME_H
37 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/debug-internals.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/utils/mono-error-internals.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
65 #include <mono/utils/mono-threads-coop.h>
71 #include "jit-icalls.h"
73 #include "debugger-agent.h"
74 #include "seq-points.h"
75 #include "aot-compiler.h"
76 #include "mini-llvm.h"
78 #define BRANCH_COST 10
79 #define INLINE_LENGTH_LIMIT 20
81 /* These have 'cfg' as an implicit argument */
82 #define INLINE_FAILURE(msg) do { \
83 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
84 inline_failure (cfg, msg); \
85 goto exception_exit; \
88 #define CHECK_CFG_EXCEPTION do {\
89 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
110 mono_error_set_out_of_memory (&cfg->error, ""); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
147 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
148 guchar *ip, guint real_offset, gboolean inline_always);
150 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
152 inline static MonoInst*
153 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
159 static MonoMethodSignature *helper_sig_jit_thread_attach;
160 static MonoMethodSignature *helper_sig_get_tls_tramp;
161 static MonoMethodSignature *helper_sig_set_tls_tramp;
163 /* type loading helpers */
164 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
165 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
168 * Instruction metadata
176 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
177 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
183 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
188 /* keep in sync with the enum in mini.h */
191 #include "mini-ops.h"
196 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
197 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
199 * This should contain the index of the last sreg + 1. This is not the same
200 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
202 const gint8 ins_sreg_counts[] = {
203 #include "mini-ops.h"
209 mono_alloc_ireg (MonoCompile *cfg)
211 return alloc_ireg (cfg);
215 mono_alloc_lreg (MonoCompile *cfg)
217 return alloc_lreg (cfg);
221 mono_alloc_freg (MonoCompile *cfg)
223 return alloc_freg (cfg);
227 mono_alloc_preg (MonoCompile *cfg)
229 return alloc_preg (cfg);
233 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
235 return alloc_dreg (cfg, stack_type);
239 * mono_alloc_ireg_ref:
241 * Allocate an IREG, and mark it as holding a GC ref.
244 mono_alloc_ireg_ref (MonoCompile *cfg)
246 return alloc_ireg_ref (cfg);
250 * mono_alloc_ireg_mp:
252 * Allocate an IREG, and mark it as holding a managed pointer.
255 mono_alloc_ireg_mp (MonoCompile *cfg)
257 return alloc_ireg_mp (cfg);
261 * mono_alloc_ireg_copy:
263 * Allocate an IREG with the same GC type as VREG.
266 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
268 if (vreg_is_ref (cfg, vreg))
269 return alloc_ireg_ref (cfg);
270 else if (vreg_is_mp (cfg, vreg))
271 return alloc_ireg_mp (cfg);
273 return alloc_ireg (cfg);
277 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
282 type = mini_get_underlying_type (type);
284 switch (type->type) {
297 case MONO_TYPE_FNPTR:
299 case MONO_TYPE_CLASS:
300 case MONO_TYPE_STRING:
301 case MONO_TYPE_OBJECT:
302 case MONO_TYPE_SZARRAY:
303 case MONO_TYPE_ARRAY:
307 #if SIZEOF_REGISTER == 8
313 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
316 case MONO_TYPE_VALUETYPE:
317 if (type->data.klass->enumtype) {
318 type = mono_class_enum_basetype (type->data.klass);
321 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
324 case MONO_TYPE_TYPEDBYREF:
326 case MONO_TYPE_GENERICINST:
327 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
329 type = &type->data.generic_class->container_class->byval_arg;
333 g_assert (cfg->gshared);
334 if (mini_type_var_is_vt (type))
337 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
339 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
345 mono_print_bb (MonoBasicBlock *bb, const char *msg)
349 GString *str = g_string_new ("");
351 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
354 g_string_append_printf (str, ", OUT: ");
355 for (i = 0; i < bb->out_count; ++i)
356 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
357 g_string_append_printf (str, " ]\n");
359 g_print ("%s", str->str);
360 g_string_free (str, TRUE);
362 for (tree = bb->code; tree; tree = tree->next)
363 mono_print_ins_index (-1, tree);
367 mono_create_helper_signatures (void)
369 helper_sig_domain_get = mono_create_icall_signature ("ptr");
370 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
371 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
372 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
373 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
374 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
377 static MONO_NEVER_INLINE void
378 break_on_unverified (void)
380 if (mini_get_debug_options ()->break_on_unverified)
384 static MONO_NEVER_INLINE void
385 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
387 char *method_fname = mono_method_full_name (method, TRUE);
388 char *field_fname = mono_field_full_name (field);
389 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
390 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
391 g_free (method_fname);
392 g_free (field_fname);
395 static MONO_NEVER_INLINE void
396 inline_failure (MonoCompile *cfg, const char *msg)
398 if (cfg->verbose_level >= 2)
399 printf ("inline failed: %s\n", msg);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
403 static MONO_NEVER_INLINE void
404 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 if (cfg->verbose_level > 2) \
407 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
411 static MONO_NEVER_INLINE void
412 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
414 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
415 if (cfg->verbose_level >= 2)
416 printf ("%s\n", cfg->exception_message);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
421 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
422 * foo<T> (int i) { ldarg.0; box T; }
424 #define UNVERIFIED do { \
425 if (cfg->gsharedvt) { \
426 if (cfg->verbose_level > 2) \
427 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
429 goto exception_exit; \
431 break_on_unverified (); \
435 #define GET_BBLOCK(cfg,tblock,ip) do { \
436 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
438 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
439 NEW_BBLOCK (cfg, (tblock)); \
440 (tblock)->cil_code = (ip); \
441 ADD_BBLOCK (cfg, (tblock)); \
445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
446 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
447 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
448 (dest)->dreg = alloc_ireg_mp ((cfg)); \
449 (dest)->sreg1 = (sr1); \
450 (dest)->sreg2 = (sr2); \
451 (dest)->inst_imm = (imm); \
452 (dest)->backend.shift_amount = (shift); \
453 MONO_ADD_INS ((cfg)->cbb, (dest)); \
457 /* Emit conversions so both operands of a binary opcode are of the same type */
459 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
461 MonoInst *arg1 = *arg1_ref;
462 MonoInst *arg2 = *arg2_ref;
465 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
466 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
469 /* Mixing r4/r8 is allowed by the spec */
470 if (arg1->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
474 conv->type = STACK_R8;
478 if (arg2->type == STACK_R4) {
479 int dreg = alloc_freg (cfg);
481 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
482 conv->type = STACK_R8;
488 #if SIZEOF_REGISTER == 8
489 /* FIXME: Need to add many more cases */
490 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
493 int dr = alloc_preg (cfg);
494 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
495 (ins)->sreg2 = widen->dreg;
500 #define ADD_BINOP(op) do { \
501 MONO_INST_NEW (cfg, ins, (op)); \
503 ins->sreg1 = sp [0]->dreg; \
504 ins->sreg2 = sp [1]->dreg; \
505 type_from_op (cfg, ins, sp [0], sp [1]); \
507 /* Have to insert a widening op */ \
508 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
509 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
510 MONO_ADD_INS ((cfg)->cbb, (ins)); \
511 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
514 #define ADD_UNOP(op) do { \
515 MONO_INST_NEW (cfg, ins, (op)); \
517 ins->sreg1 = sp [0]->dreg; \
518 type_from_op (cfg, ins, sp [0], NULL); \
520 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
521 MONO_ADD_INS ((cfg)->cbb, (ins)); \
522 *sp++ = mono_decompose_opcode (cfg, ins); \
525 #define ADD_BINCOND(next_block) do { \
528 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
529 cmp->sreg1 = sp [0]->dreg; \
530 cmp->sreg2 = sp [1]->dreg; \
531 type_from_op (cfg, cmp, sp [0], sp [1]); \
533 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
536 GET_BBLOCK (cfg, tblock, target); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_true_bb = tblock; \
539 if ((next_block)) { \
540 link_bblock (cfg, cfg->cbb, (next_block)); \
541 ins->inst_false_bb = (next_block); \
542 start_new_bblock = 1; \
544 GET_BBLOCK (cfg, tblock, ip); \
545 link_bblock (cfg, cfg->cbb, tblock); \
546 ins->inst_false_bb = tblock; \
547 start_new_bblock = 2; \
549 if (sp != stack_start) { \
550 handle_stack_args (cfg, stack_start, sp - stack_start); \
551 CHECK_UNVERIFIABLE (cfg); \
553 MONO_ADD_INS (cfg->cbb, cmp); \
554 MONO_ADD_INS (cfg->cbb, ins); \
558 * link_bblock: Links two basic blocks
560 * links two basic blocks in the control flow graph, the 'from'
561 * argument is the starting block and the 'to' argument is the block
562 * the control flow ends to after 'from'.
565 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
567 MonoBasicBlock **newa;
571 if (from->cil_code) {
573 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
575 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
578 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
580 printf ("edge from entry to exit\n");
585 for (i = 0; i < from->out_count; ++i) {
586 if (to == from->out_bb [i]) {
592 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
593 for (i = 0; i < from->out_count; ++i) {
594 newa [i] = from->out_bb [i];
602 for (i = 0; i < to->in_count; ++i) {
603 if (from == to->in_bb [i]) {
609 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
610 for (i = 0; i < to->in_count; ++i) {
611 newa [i] = to->in_bb [i];
620 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
622 link_bblock (cfg, from, to);
626 * mono_find_block_region:
628 * We mark each basic block with a region ID. We use that to avoid BB
629 * optimizations when blocks are in different regions.
632 * A region token that encodes where this region is, and information
633 * about the clause owner for this block.
635 * The region encodes the try/catch/filter clause that owns this block
636 * as well as the type. -1 is a special value that represents a block
637 * that is in none of try/catch/filter.
640 mono_find_block_region (MonoCompile *cfg, int offset)
642 MonoMethodHeader *header = cfg->header;
643 MonoExceptionClause *clause;
646 for (i = 0; i < header->num_clauses; ++i) {
647 clause = &header->clauses [i];
648 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
649 (offset < (clause->handler_offset)))
650 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
652 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
653 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
654 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
655 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
656 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
658 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
661 for (i = 0; i < header->num_clauses; ++i) {
662 clause = &header->clauses [i];
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 ip_in_finally_clause (MonoCompile *cfg, int offset)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
678 for (i = 0; i < header->num_clauses; ++i) {
679 clause = &header->clauses [i];
680 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
683 if (MONO_OFFSET_IN_HANDLER (clause, offset))
690 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
692 MonoMethodHeader *header = cfg->header;
693 MonoExceptionClause *clause;
697 for (i = 0; i < header->num_clauses; ++i) {
698 clause = &header->clauses [i];
699 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
700 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
701 if (clause->flags == type)
702 res = g_list_append (res, clause);
709 mono_create_spvar_for_region (MonoCompile *cfg, int region)
713 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
717 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
718 /* prevent it from being register allocated */
719 var->flags |= MONO_INST_VOLATILE;
721 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
725 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
727 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
731 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
735 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
739 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
740 /* prevent it from being register allocated */
741 var->flags |= MONO_INST_VOLATILE;
743 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
749 * Returns the type used in the eval stack when @type is loaded.
750 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
753 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
757 type = mini_get_underlying_type (type);
758 inst->klass = klass = mono_class_from_mono_type (type);
760 inst->type = STACK_MP;
765 switch (type->type) {
767 inst->type = STACK_INV;
775 inst->type = STACK_I4;
780 case MONO_TYPE_FNPTR:
781 inst->type = STACK_PTR;
783 case MONO_TYPE_CLASS:
784 case MONO_TYPE_STRING:
785 case MONO_TYPE_OBJECT:
786 case MONO_TYPE_SZARRAY:
787 case MONO_TYPE_ARRAY:
788 inst->type = STACK_OBJ;
792 inst->type = STACK_I8;
795 inst->type = cfg->r4_stack_type;
798 inst->type = STACK_R8;
800 case MONO_TYPE_VALUETYPE:
801 if (type->data.klass->enumtype) {
802 type = mono_class_enum_basetype (type->data.klass);
806 inst->type = STACK_VTYPE;
809 case MONO_TYPE_TYPEDBYREF:
810 inst->klass = mono_defaults.typed_reference_class;
811 inst->type = STACK_VTYPE;
813 case MONO_TYPE_GENERICINST:
814 type = &type->data.generic_class->container_class->byval_arg;
818 g_assert (cfg->gshared);
819 if (mini_is_gsharedvt_type (type)) {
820 g_assert (cfg->gsharedvt);
821 inst->type = STACK_VTYPE;
823 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
827 g_error ("unknown type 0x%02x in eval stack type", type->type);
832 * The following tables are used to quickly validate the IL code in type_from_op ().
835 bin_num_table [STACK_MAX] [STACK_MAX] = {
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
841 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
849 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
852 /* reduce the size of this table */
854 bin_int_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 bin_comp_table [STACK_MAX] [STACK_MAX] = {
867 /* Inv i L p F & O vt r4 */
869 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
870 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
871 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
873 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
874 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
875 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
876 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
879 /* reduce the size of this table */
881 shift_table [STACK_MAX] [STACK_MAX] = {
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
889 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
893 * Tables to map from the non-specific opcode to the matching
894 * type-specific opcode.
896 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
898 binops_op_map [STACK_MAX] = {
899 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
902 /* handles from CEE_NEG to CEE_CONV_U8 */
904 unops_op_map [STACK_MAX] = {
905 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
908 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
910 ovfops_op_map [STACK_MAX] = {
911 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
914 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
916 ovf2ops_op_map [STACK_MAX] = {
917 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
920 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
922 ovf3ops_op_map [STACK_MAX] = {
923 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
926 /* handles from CEE_BEQ to CEE_BLT_UN */
928 beqops_op_map [STACK_MAX] = {
929 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
932 /* handles from CEE_CEQ to CEE_CLT_UN */
934 ceqops_op_map [STACK_MAX] = {
935 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
939 * Sets ins->type (the type on the eval stack) according to the
940 * type of the opcode and the arguments to it.
941 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
943 * FIXME: this function sets ins->type unconditionally in some cases, but
944 * it should set it to invalid for some types (a conv.x on an object)
947 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
949 switch (ins->opcode) {
956 /* FIXME: check unverifiable args for STACK_MP */
957 ins->type = bin_num_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
965 ins->type = bin_int_table [src1->type] [src2->type];
966 ins->opcode += binops_op_map [ins->type];
971 ins->type = shift_table [src1->type] [src2->type];
972 ins->opcode += binops_op_map [ins->type];
977 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
978 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
979 ins->opcode = OP_LCOMPARE;
980 else if (src1->type == STACK_R4)
981 ins->opcode = OP_RCOMPARE;
982 else if (src1->type == STACK_R8)
983 ins->opcode = OP_FCOMPARE;
985 ins->opcode = OP_ICOMPARE;
987 case OP_ICOMPARE_IMM:
988 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
989 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
990 ins->opcode = OP_LCOMPARE_IMM;
1002 ins->opcode += beqops_op_map [src1->type];
1005 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1006 ins->opcode += ceqops_op_map [src1->type];
1012 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1013 ins->opcode += ceqops_op_map [src1->type];
1017 ins->type = neg_table [src1->type];
1018 ins->opcode += unops_op_map [ins->type];
1021 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1022 ins->type = src1->type;
1024 ins->type = STACK_INV;
1025 ins->opcode += unops_op_map [ins->type];
1031 ins->type = STACK_I4;
1032 ins->opcode += unops_op_map [src1->type];
1035 ins->type = STACK_R8;
1036 switch (src1->type) {
1039 ins->opcode = OP_ICONV_TO_R_UN;
1042 ins->opcode = OP_LCONV_TO_R_UN;
1046 case CEE_CONV_OVF_I1:
1047 case CEE_CONV_OVF_U1:
1048 case CEE_CONV_OVF_I2:
1049 case CEE_CONV_OVF_U2:
1050 case CEE_CONV_OVF_I4:
1051 case CEE_CONV_OVF_U4:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf3ops_op_map [src1->type];
1055 case CEE_CONV_OVF_I_UN:
1056 case CEE_CONV_OVF_U_UN:
1057 ins->type = STACK_PTR;
1058 ins->opcode += ovf2ops_op_map [src1->type];
1060 case CEE_CONV_OVF_I1_UN:
1061 case CEE_CONV_OVF_I2_UN:
1062 case CEE_CONV_OVF_I4_UN:
1063 case CEE_CONV_OVF_U1_UN:
1064 case CEE_CONV_OVF_U2_UN:
1065 case CEE_CONV_OVF_U4_UN:
1066 ins->type = STACK_I4;
1067 ins->opcode += ovf2ops_op_map [src1->type];
1070 ins->type = STACK_PTR;
1071 switch (src1->type) {
1073 ins->opcode = OP_ICONV_TO_U;
1077 #if SIZEOF_VOID_P == 8
1078 ins->opcode = OP_LCONV_TO_U;
1080 ins->opcode = OP_MOVE;
1084 ins->opcode = OP_LCONV_TO_U;
1087 ins->opcode = OP_FCONV_TO_U;
1093 ins->type = STACK_I8;
1094 ins->opcode += unops_op_map [src1->type];
1096 case CEE_CONV_OVF_I8:
1097 case CEE_CONV_OVF_U8:
1098 ins->type = STACK_I8;
1099 ins->opcode += ovf3ops_op_map [src1->type];
1101 case CEE_CONV_OVF_U8_UN:
1102 case CEE_CONV_OVF_I8_UN:
1103 ins->type = STACK_I8;
1104 ins->opcode += ovf2ops_op_map [src1->type];
1107 ins->type = cfg->r4_stack_type;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1112 ins->opcode += unops_op_map [src1->type];
1115 ins->type = STACK_R8;
1119 ins->type = STACK_I4;
1120 ins->opcode += ovfops_op_map [src1->type];
1123 case CEE_CONV_OVF_I:
1124 case CEE_CONV_OVF_U:
1125 ins->type = STACK_PTR;
1126 ins->opcode += ovfops_op_map [src1->type];
1129 case CEE_ADD_OVF_UN:
1131 case CEE_MUL_OVF_UN:
1133 case CEE_SUB_OVF_UN:
1134 ins->type = bin_num_table [src1->type] [src2->type];
1135 ins->opcode += ovfops_op_map [src1->type];
1136 if (ins->type == STACK_R8)
1137 ins->type = STACK_INV;
1139 case OP_LOAD_MEMBASE:
1140 ins->type = STACK_PTR;
1142 case OP_LOADI1_MEMBASE:
1143 case OP_LOADU1_MEMBASE:
1144 case OP_LOADI2_MEMBASE:
1145 case OP_LOADU2_MEMBASE:
1146 case OP_LOADI4_MEMBASE:
1147 case OP_LOADU4_MEMBASE:
1148 ins->type = STACK_PTR;
1150 case OP_LOADI8_MEMBASE:
1151 ins->type = STACK_I8;
1153 case OP_LOADR4_MEMBASE:
1154 ins->type = cfg->r4_stack_type;
1156 case OP_LOADR8_MEMBASE:
1157 ins->type = STACK_R8;
1160 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1164 if (ins->type == STACK_MP)
1165 ins->klass = mono_defaults.object_class;
1170 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1176 param_table [STACK_MAX] [STACK_MAX] = {
1181 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1186 switch (args->type) {
1196 for (i = 0; i < sig->param_count; ++i) {
1197 switch (args [i].type) {
1201 if (!sig->params [i]->byref)
1205 if (sig->params [i]->byref)
1207 switch (sig->params [i]->type) {
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1219 if (sig->params [i]->byref)
1221 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1230 /*if (!param_table [args [i].type] [sig->params [i]->type])
1238 * When we need a pointer to the current domain many times in a method, we
1239 * call mono_domain_get() once and we store the result in a local variable.
1240 * This function returns the variable that represents the MonoDomain*.
1242 inline static MonoInst *
1243 mono_get_domainvar (MonoCompile *cfg)
1245 if (!cfg->domainvar)
1246 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 return cfg->domainvar;
1251 * The got_var contains the address of the Global Offset Table when AOT
1255 mono_get_got_var (MonoCompile *cfg)
1257 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1259 if (!cfg->got_var) {
1260 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1262 return cfg->got_var;
1266 mono_get_vtable_var (MonoCompile *cfg)
1268 g_assert (cfg->gshared);
1270 if (!cfg->rgctx_var) {
1271 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 /* force the var to be stack allocated */
1273 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1276 return cfg->rgctx_var;
1280 type_from_stack_type (MonoInst *ins) {
1281 switch (ins->type) {
1282 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1283 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1284 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1285 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1286 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1288 return &ins->klass->this_arg;
1289 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1290 case STACK_VTYPE: return &ins->klass->byval_arg;
1292 g_error ("stack type %d to monotype not handled\n", ins->type);
1297 static G_GNUC_UNUSED int
1298 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1300 t = mono_type_get_underlying_type (t);
1312 case MONO_TYPE_FNPTR:
1314 case MONO_TYPE_CLASS:
1315 case MONO_TYPE_STRING:
1316 case MONO_TYPE_OBJECT:
1317 case MONO_TYPE_SZARRAY:
1318 case MONO_TYPE_ARRAY:
1324 return cfg->r4_stack_type;
1327 case MONO_TYPE_VALUETYPE:
1328 case MONO_TYPE_TYPEDBYREF:
1330 case MONO_TYPE_GENERICINST:
1331 if (mono_type_generic_inst_is_valuetype (t))
1337 g_assert_not_reached ();
1344 array_access_to_klass (int opcode)
1348 return mono_defaults.byte_class;
1350 return mono_defaults.uint16_class;
1353 return mono_defaults.int_class;
1356 return mono_defaults.sbyte_class;
1359 return mono_defaults.int16_class;
1362 return mono_defaults.int32_class;
1364 return mono_defaults.uint32_class;
1367 return mono_defaults.int64_class;
1370 return mono_defaults.single_class;
1373 return mono_defaults.double_class;
1374 case CEE_LDELEM_REF:
1375 case CEE_STELEM_REF:
1376 return mono_defaults.object_class;
1378 g_assert_not_reached ();
1384 * We try to share variables when possible
1387 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1392 /* inlining can result in deeper stacks */
1393 if (slot >= cfg->header->max_stack)
1394 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1396 pos = ins->type - 1 + slot * STACK_MAX;
1398 switch (ins->type) {
1405 if ((vnum = cfg->intvars [pos]))
1406 return cfg->varinfo [vnum];
1407 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1408 cfg->intvars [pos] = res->inst_c0;
1411 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1417 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1420 * Don't use this if a generic_context is set, since that means AOT can't
1421 * look up the method using just the image+token.
1422 * table == 0 means this is a reference made from a wrapper.
1424 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1425 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1426 jump_info_token->image = image;
1427 jump_info_token->token = token;
1428 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1433 * This function is called to handle items that are left on the evaluation stack
1434 * at basic block boundaries. What happens is that we save the values to local variables
1435 * and we reload them later when first entering the target basic block (with the
1436 * handle_loaded_temps () function).
1437 * A single joint point will use the same variables (stored in the array bb->out_stack or
1438 * bb->in_stack, if the basic block is before or after the joint point).
1440 * This function needs to be called _before_ emitting the last instruction of
1441 * the bb (i.e. before emitting a branch).
1442 * If the stack merge fails at a join point, cfg->unverifiable is set.
1445 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1448 MonoBasicBlock *bb = cfg->cbb;
1449 MonoBasicBlock *outb;
1450 MonoInst *inst, **locals;
1455 if (cfg->verbose_level > 3)
1456 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1457 if (!bb->out_scount) {
1458 bb->out_scount = count;
1459 //printf ("bblock %d has out:", bb->block_num);
1461 for (i = 0; i < bb->out_count; ++i) {
1462 outb = bb->out_bb [i];
1463 /* exception handlers are linked, but they should not be considered for stack args */
1464 if (outb->flags & BB_EXCEPTION_HANDLER)
1466 //printf (" %d", outb->block_num);
1467 if (outb->in_stack) {
1469 bb->out_stack = outb->in_stack;
1475 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1476 for (i = 0; i < count; ++i) {
1478 * try to reuse temps already allocated for this purpouse, if they occupy the same
1479 * stack slot and if they are of the same type.
1480 * This won't cause conflicts since if 'local' is used to
1481 * store one of the values in the in_stack of a bblock, then
1482 * the same variable will be used for the same outgoing stack
1484 * This doesn't work when inlining methods, since the bblocks
1485 * in the inlined methods do not inherit their in_stack from
1486 * the bblock they are inlined to. See bug #58863 for an
1489 if (cfg->inlined_method)
1490 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1492 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1497 for (i = 0; i < bb->out_count; ++i) {
1498 outb = bb->out_bb [i];
1499 /* exception handlers are linked, but they should not be considered for stack args */
1500 if (outb->flags & BB_EXCEPTION_HANDLER)
1502 if (outb->in_scount) {
1503 if (outb->in_scount != bb->out_scount) {
1504 cfg->unverifiable = TRUE;
1507 continue; /* check they are the same locals */
1509 outb->in_scount = count;
1510 outb->in_stack = bb->out_stack;
1513 locals = bb->out_stack;
1515 for (i = 0; i < count; ++i) {
1516 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1517 inst->cil_code = sp [i]->cil_code;
1518 sp [i] = locals [i];
1519 if (cfg->verbose_level > 3)
1520 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1524 * It is possible that the out bblocks already have in_stack assigned, and
1525 * the in_stacks differ. In this case, we will store to all the different
1532 /* Find a bblock which has a different in_stack */
1534 while (bindex < bb->out_count) {
1535 outb = bb->out_bb [bindex];
1536 /* exception handlers are linked, but they should not be considered for stack args */
1537 if (outb->flags & BB_EXCEPTION_HANDLER) {
1541 if (outb->in_stack != locals) {
1542 for (i = 0; i < count; ++i) {
1543 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1544 inst->cil_code = sp [i]->cil_code;
1545 sp [i] = locals [i];
1546 if (cfg->verbose_level > 3)
1547 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1549 locals = outb->in_stack;
1559 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1563 if (cfg->compile_aot) {
1564 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1570 ji.type = patch_type;
1571 ji.data.target = data;
1572 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1573 mono_error_assert_ok (&error);
1575 EMIT_NEW_PCONST (cfg, ins, target);
1581 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1583 return emit_runtime_constant (cfg, patch_type, data);
1587 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1591 g_assert (val == 0);
1596 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1599 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1602 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1605 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1607 #if SIZEOF_REGISTER == 8
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1615 val_reg = alloc_preg (cfg);
1617 if (SIZEOF_REGISTER == 8)
1618 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1620 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1623 /* This could be optimized further if neccesary */
1625 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1632 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1634 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1663 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1670 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1671 g_assert (size < 10000);
1674 /* This could be optimized further if neccesary */
1676 cur_reg = alloc_preg (cfg);
1677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1685 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1697 cur_reg = alloc_preg (cfg);
1698 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1705 cur_reg = alloc_preg (cfg);
1706 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1713 cur_reg = alloc_preg (cfg);
1714 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1715 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1723 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1725 int tls_offset = mono_tls_get_tls_offset (key);
1727 if (cfg->compile_aot)
1730 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1732 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1733 ins->dreg = mono_alloc_preg (cfg);
1734 ins->inst_offset = tls_offset;
1741 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1743 int tls_offset = mono_tls_get_tls_offset (key);
1745 if (cfg->compile_aot)
1748 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1750 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1751 ins->sreg1 = value->dreg;
1752 ins->inst_offset = tls_offset;
1760 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1762 MonoInst *fast_tls = NULL;
1764 if (!mini_get_debug_options ()->use_fallback_tls)
1765 fast_tls = mono_create_fast_tls_getter (cfg, key);
1768 MONO_ADD_INS (cfg->cbb, fast_tls);
1772 if (cfg->compile_aot) {
1775 * tls getters are critical pieces of code and we don't want to resolve them
1776 * through the standard plt/tramp mechanism since we might expose ourselves
1777 * to crashes and infinite recursions.
1779 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1780 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1782 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1783 return mono_emit_jit_icall (cfg, getter, NULL);
1788 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1790 MonoInst *fast_tls = NULL;
1792 if (!mini_get_debug_options ()->use_fallback_tls)
1793 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1796 MONO_ADD_INS (cfg->cbb, fast_tls);
1800 if (cfg->compile_aot) {
1802 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1803 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1805 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1806 return mono_emit_jit_icall (cfg, setter, &value);
1813 * Emit IR to push the current LMF onto the LMF stack.
1816 emit_push_lmf (MonoCompile *cfg)
1819 * Emit IR to push the LMF:
1820 * lmf_addr = <lmf_addr from tls>
1821 * lmf->lmf_addr = lmf_addr
1822 * lmf->prev_lmf = *lmf_addr
1825 MonoInst *ins, *lmf_ins;
1830 if (cfg->lmf_ir_mono_lmf) {
1831 MonoInst *lmf_vara_ins, *lmf_ins;
1832 /* Load current lmf */
1833 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
1835 EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
1836 /* Save previous_lmf */
1837 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1839 mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
1841 int lmf_reg, prev_lmf_reg;
1843 * Store lmf_addr in a variable, so it can be allocated to a global register.
1845 if (!cfg->lmf_addr_var)
1846 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1849 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1851 int jit_tls_dreg = ins->dreg;
1853 lmf_reg = alloc_preg (cfg);
1854 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1856 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1859 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1861 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1862 lmf_reg = ins->dreg;
1864 prev_lmf_reg = alloc_preg (cfg);
1865 /* Save previous_lmf */
1866 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1867 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1869 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1876 * Emit IR to pop the current LMF from the LMF stack.
1879 emit_pop_lmf (MonoCompile *cfg)
1881 int lmf_reg, lmf_addr_reg;
1887 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1888 lmf_reg = ins->dreg;
1890 if (cfg->lmf_ir_mono_lmf) {
1891 /* Load previous_lmf */
1892 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1894 mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
1898 * Emit IR to pop the LMF:
1899 * *(lmf->lmf_addr) = lmf->prev_lmf
1901 /* This could be called before emit_push_lmf () */
1902 if (!cfg->lmf_addr_var)
1903 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1904 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1906 prev_lmf_reg = alloc_preg (cfg);
1907 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1908 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1913 emit_instrumentation_call (MonoCompile *cfg, void *func)
1915 MonoInst *iargs [1];
1918 * Avoid instrumenting inlined methods since it can
1919 * distort profiling results.
1921 if (cfg->method != cfg->current_method)
1924 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1925 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1926 mono_emit_jit_icall (cfg, func, iargs);
1931 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1934 type = mini_get_underlying_type (type);
1935 switch (type->type) {
1936 case MONO_TYPE_VOID:
1937 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1944 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1948 case MONO_TYPE_FNPTR:
1949 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1950 case MONO_TYPE_CLASS:
1951 case MONO_TYPE_STRING:
1952 case MONO_TYPE_OBJECT:
1953 case MONO_TYPE_SZARRAY:
1954 case MONO_TYPE_ARRAY:
1955 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1958 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1961 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1963 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1965 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1966 case MONO_TYPE_VALUETYPE:
1967 if (type->data.klass->enumtype) {
1968 type = mono_class_enum_basetype (type->data.klass);
1971 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1972 case MONO_TYPE_TYPEDBYREF:
1973 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1974 case MONO_TYPE_GENERICINST:
1975 type = &type->data.generic_class->container_class->byval_arg;
1978 case MONO_TYPE_MVAR:
1980 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1982 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1987 //XXX this ignores if t is byref
1988 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1991 * target_type_is_incompatible:
1992 * @cfg: MonoCompile context
1994 * Check that the item @arg on the evaluation stack can be stored
1995 * in the target type (can be a local, or field, etc).
1996 * The cfg arg can be used to check if we need verification or just
1999 * Returns: non-0 value if arg can't be stored on a target.
2002 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2004 MonoType *simple_type;
2007 if (target->byref) {
2008 /* FIXME: check that the pointed to types match */
2009 if (arg->type == STACK_MP) {
2010 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2011 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2012 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2014 /* if the target is native int& or same type */
2015 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2018 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2019 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2020 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2024 if (arg->type == STACK_PTR)
2029 simple_type = mini_get_underlying_type (target);
2030 switch (simple_type->type) {
2031 case MONO_TYPE_VOID:
2039 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2043 /* STACK_MP is needed when setting pinned locals */
2044 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2049 case MONO_TYPE_FNPTR:
2051 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2052 * in native int. (#688008).
2054 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2057 case MONO_TYPE_CLASS:
2058 case MONO_TYPE_STRING:
2059 case MONO_TYPE_OBJECT:
2060 case MONO_TYPE_SZARRAY:
2061 case MONO_TYPE_ARRAY:
2062 if (arg->type != STACK_OBJ)
2064 /* FIXME: check type compatibility */
2068 if (arg->type != STACK_I8)
2072 if (arg->type != cfg->r4_stack_type)
2076 if (arg->type != STACK_R8)
2079 case MONO_TYPE_VALUETYPE:
2080 if (arg->type != STACK_VTYPE)
2082 klass = mono_class_from_mono_type (simple_type);
2083 if (klass != arg->klass)
2086 case MONO_TYPE_TYPEDBYREF:
2087 if (arg->type != STACK_VTYPE)
2089 klass = mono_class_from_mono_type (simple_type);
2090 if (klass != arg->klass)
2093 case MONO_TYPE_GENERICINST:
2094 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2095 MonoClass *target_class;
2096 if (arg->type != STACK_VTYPE)
2098 klass = mono_class_from_mono_type (simple_type);
2099 target_class = mono_class_from_mono_type (target);
2100 /* The second cases is needed when doing partial sharing */
2101 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2105 if (arg->type != STACK_OBJ)
2107 /* FIXME: check type compatibility */
2111 case MONO_TYPE_MVAR:
2112 g_assert (cfg->gshared);
2113 if (mini_type_var_is_vt (simple_type)) {
2114 if (arg->type != STACK_VTYPE)
2117 if (arg->type != STACK_OBJ)
2122 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2128 * Prepare arguments for passing to a function call.
2129 * Return a non-zero value if the arguments can't be passed to the given
2131 * The type checks are not yet complete and some conversions may need
2132 * casts on 32 or 64 bit architectures.
2134 * FIXME: implement this using target_type_is_incompatible ()
2137 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2139 MonoType *simple_type;
2143 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2147 for (i = 0; i < sig->param_count; ++i) {
2148 if (sig->params [i]->byref) {
2149 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2153 simple_type = mini_get_underlying_type (sig->params [i]);
2155 switch (simple_type->type) {
2156 case MONO_TYPE_VOID:
2165 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2171 case MONO_TYPE_FNPTR:
2172 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2175 case MONO_TYPE_CLASS:
2176 case MONO_TYPE_STRING:
2177 case MONO_TYPE_OBJECT:
2178 case MONO_TYPE_SZARRAY:
2179 case MONO_TYPE_ARRAY:
2180 if (args [i]->type != STACK_OBJ)
2185 if (args [i]->type != STACK_I8)
2189 if (args [i]->type != cfg->r4_stack_type)
2193 if (args [i]->type != STACK_R8)
2196 case MONO_TYPE_VALUETYPE:
2197 if (simple_type->data.klass->enumtype) {
2198 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2201 if (args [i]->type != STACK_VTYPE)
2204 case MONO_TYPE_TYPEDBYREF:
2205 if (args [i]->type != STACK_VTYPE)
2208 case MONO_TYPE_GENERICINST:
2209 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2212 case MONO_TYPE_MVAR:
2214 if (args [i]->type != STACK_VTYPE)
2218 g_error ("unknown type 0x%02x in check_call_signature",
2226 callvirt_to_call (int opcode)
2229 case OP_CALL_MEMBASE:
2231 case OP_VOIDCALL_MEMBASE:
2233 case OP_FCALL_MEMBASE:
2235 case OP_RCALL_MEMBASE:
2237 case OP_VCALL_MEMBASE:
2239 case OP_LCALL_MEMBASE:
2242 g_assert_not_reached ();
2249 callvirt_to_call_reg (int opcode)
2252 case OP_CALL_MEMBASE:
2254 case OP_VOIDCALL_MEMBASE:
2255 return OP_VOIDCALL_REG;
2256 case OP_FCALL_MEMBASE:
2257 return OP_FCALL_REG;
2258 case OP_RCALL_MEMBASE:
2259 return OP_RCALL_REG;
2260 case OP_VCALL_MEMBASE:
2261 return OP_VCALL_REG;
2262 case OP_LCALL_MEMBASE:
2263 return OP_LCALL_REG;
2265 g_assert_not_reached ();
2271 /* Either METHOD or IMT_ARG needs to be set */
2273 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2277 if (COMPILE_LLVM (cfg)) {
2279 method_reg = alloc_preg (cfg);
2280 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2282 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2283 method_reg = ins->dreg;
2287 call->imt_arg_reg = method_reg;
2289 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2294 method_reg = alloc_preg (cfg);
2295 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2297 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2298 method_reg = ins->dreg;
2301 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2304 static MonoJumpInfo *
2305 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2307 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2311 ji->data.target = target;
2317 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2320 return mono_class_check_context_used (klass);
2326 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2329 return mono_method_check_context_used (method);
2335 * check_method_sharing:
2337 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2340 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2342 gboolean pass_vtable = FALSE;
2343 gboolean pass_mrgctx = FALSE;
2345 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2346 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2347 gboolean sharable = FALSE;
2349 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2353 * Pass vtable iff target method might
2354 * be shared, which means that sharing
2355 * is enabled for its class and its
2356 * context is sharable (and it's not a
2359 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2363 if (mini_method_get_context (cmethod) &&
2364 mini_method_get_context (cmethod)->method_inst) {
2365 g_assert (!pass_vtable);
2367 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2370 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2375 if (out_pass_vtable)
2376 *out_pass_vtable = pass_vtable;
2377 if (out_pass_mrgctx)
2378 *out_pass_mrgctx = pass_mrgctx;
2381 inline static MonoCallInst *
2382 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2383 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2387 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2395 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2397 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2399 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2402 call->signature = sig;
2403 call->rgctx_reg = rgctx;
2404 sig_ret = mini_get_underlying_type (sig->ret);
2406 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2409 if (mini_type_is_vtype (sig_ret)) {
2410 call->vret_var = cfg->vret_addr;
2411 //g_assert_not_reached ();
2413 } else if (mini_type_is_vtype (sig_ret)) {
2414 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2417 temp->backend.is_pinvoke = sig->pinvoke;
2420 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2421 * address of return value to increase optimization opportunities.
2422 * Before vtype decomposition, the dreg of the call ins itself represents the
2423 * fact the call modifies the return value. After decomposition, the call will
2424 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2425 * will be transformed into an LDADDR.
2427 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2428 loada->dreg = alloc_preg (cfg);
2429 loada->inst_p0 = temp;
2430 /* We reference the call too since call->dreg could change during optimization */
2431 loada->inst_p1 = call;
2432 MONO_ADD_INS (cfg->cbb, loada);
2434 call->inst.dreg = temp->dreg;
2436 call->vret_var = loada;
2437 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2438 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2440 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2441 if (COMPILE_SOFT_FLOAT (cfg)) {
2443 * If the call has a float argument, we would need to do an r8->r4 conversion using
2444 * an icall, but that cannot be done during the call sequence since it would clobber
2445 * the call registers + the stack. So we do it before emitting the call.
2447 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2449 MonoInst *in = call->args [i];
2451 if (i >= sig->hasthis)
2452 t = sig->params [i - sig->hasthis];
2454 t = &mono_defaults.int_class->byval_arg;
2455 t = mono_type_get_underlying_type (t);
2457 if (!t->byref && t->type == MONO_TYPE_R4) {
2458 MonoInst *iargs [1];
2462 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2464 /* The result will be in an int vreg */
2465 call->args [i] = conv;
2471 call->need_unbox_trampoline = unbox_trampoline;
2474 if (COMPILE_LLVM (cfg))
2475 mono_llvm_emit_call (cfg, call);
2477 mono_arch_emit_call (cfg, call);
2479 mono_arch_emit_call (cfg, call);
2482 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2483 cfg->flags |= MONO_CFG_HAS_CALLS;
2489 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2491 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2492 cfg->uses_rgctx_reg = TRUE;
2493 call->rgctx_reg = TRUE;
2495 call->rgctx_arg_reg = rgctx_reg;
2499 inline static MonoInst*
2500 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2505 gboolean check_sp = FALSE;
2507 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2508 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2510 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2515 rgctx_reg = mono_alloc_preg (cfg);
2516 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2520 if (!cfg->stack_inbalance_var)
2521 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2523 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2524 ins->dreg = cfg->stack_inbalance_var->dreg;
2525 MONO_ADD_INS (cfg->cbb, ins);
2528 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2530 call->inst.sreg1 = addr->dreg;
2533 emit_imt_argument (cfg, call, NULL, imt_arg);
2535 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2540 sp_reg = mono_alloc_preg (cfg);
2542 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2544 MONO_ADD_INS (cfg->cbb, ins);
2546 /* Restore the stack so we don't crash when throwing the exception */
2547 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2548 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2549 MONO_ADD_INS (cfg->cbb, ins);
2551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2556 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2558 return (MonoInst*)call;
2562 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2565 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2568 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2569 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2571 #ifndef DISABLE_REMOTING
2572 gboolean might_be_remote = FALSE;
2574 gboolean virtual_ = this_ins != NULL;
2575 gboolean enable_for_aot = TRUE;
2578 MonoInst *call_target = NULL;
2580 gboolean need_unbox_trampoline;
2583 sig = mono_method_signature (method);
2585 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2586 g_assert_not_reached ();
2589 rgctx_reg = mono_alloc_preg (cfg);
2590 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2593 if (method->string_ctor) {
2594 /* Create the real signature */
2595 /* FIXME: Cache these */
2596 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2597 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2602 context_used = mini_method_check_context_used (cfg, method);
2604 #ifndef DISABLE_REMOTING
2605 might_be_remote = this_ins && sig->hasthis &&
2606 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2607 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2609 if (might_be_remote && context_used) {
2612 g_assert (cfg->gshared);
2614 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2616 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2620 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2621 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2623 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2625 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2627 #ifndef DISABLE_REMOTING
2628 if (might_be_remote)
2629 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2632 call->method = method;
2633 call->inst.flags |= MONO_INST_HAS_METHOD;
2634 call->inst.inst_left = this_ins;
2635 call->tail_call = tail;
2638 int vtable_reg, slot_reg, this_reg;
2641 this_reg = this_ins->dreg;
2643 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2644 MonoInst *dummy_use;
2646 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2648 /* Make a call to delegate->invoke_impl */
2649 call->inst.inst_basereg = this_reg;
2650 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2653 /* We must emit a dummy use here because the delegate trampoline will
2654 replace the 'this' argument with the delegate target making this activation
2655 no longer a root for the delegate.
2656 This is an issue for delegates that target collectible code such as dynamic
2657 methods of GC'able assemblies.
2659 For a test case look into #667921.
2661 FIXME: a dummy use is not the best way to do it as the local register allocator
2662 will put it on a caller save register and spil it around the call.
2663 Ideally, we would either put it on a callee save register or only do the store part.
2665 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2667 return (MonoInst*)call;
2670 if ((!cfg->compile_aot || enable_for_aot) &&
2671 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2672 (MONO_METHOD_IS_FINAL (method) &&
2673 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2674 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2676 * the method is not virtual, we just need to ensure this is not null
2677 * and then we can call the method directly.
2679 #ifndef DISABLE_REMOTING
2680 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2682 * The check above ensures method is not gshared, this is needed since
2683 * gshared methods can't have wrappers.
2685 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2689 if (!method->string_ctor)
2690 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2692 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2693 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2695 * the method is virtual, but we can statically dispatch since either
2696 * it's class or the method itself are sealed.
2697 * But first we need to ensure it's not a null reference.
2699 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2701 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2702 } else if (call_target) {
2703 vtable_reg = alloc_preg (cfg);
2704 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2706 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2707 call->inst.sreg1 = call_target->dreg;
2708 call->inst.flags &= !MONO_INST_HAS_METHOD;
2710 vtable_reg = alloc_preg (cfg);
2711 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2712 if (mono_class_is_interface (method->klass)) {
2713 guint32 imt_slot = mono_method_get_imt_slot (method);
2714 emit_imt_argument (cfg, call, call->method, imt_arg);
2715 slot_reg = vtable_reg;
2716 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2718 slot_reg = vtable_reg;
2719 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2720 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2722 g_assert (mono_method_signature (method)->generic_param_count);
2723 emit_imt_argument (cfg, call, call->method, imt_arg);
2727 call->inst.sreg1 = slot_reg;
2728 call->inst.inst_offset = offset;
2729 call->is_virtual = TRUE;
2733 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2736 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2738 return (MonoInst*)call;
2742 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2744 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2748 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2755 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2758 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2760 return (MonoInst*)call;
2764 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2766 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2770 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2774 * mono_emit_abs_call:
2776 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2778 inline static MonoInst*
2779 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2780 MonoMethodSignature *sig, MonoInst **args)
2782 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2786 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2789 if (cfg->abs_patches == NULL)
2790 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2791 g_hash_table_insert (cfg->abs_patches, ji, ji);
2792 ins = mono_emit_native_call (cfg, ji, sig, args);
2793 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2797 static MonoMethodSignature*
2798 sig_to_rgctx_sig (MonoMethodSignature *sig)
2800 // FIXME: memory allocation
2801 MonoMethodSignature *res;
2804 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2805 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2806 res->param_count = sig->param_count + 1;
2807 for (i = 0; i < sig->param_count; ++i)
2808 res->params [i] = sig->params [i];
2809 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2813 /* Make an indirect call to FSIG passing an additional argument */
2815 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2817 MonoMethodSignature *csig;
2818 MonoInst *args_buf [16];
2820 int i, pindex, tmp_reg;
2822 /* Make a call with an rgctx/extra arg */
2823 if (fsig->param_count + 2 < 16)
2826 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2829 args [pindex ++] = orig_args [0];
2830 for (i = 0; i < fsig->param_count; ++i)
2831 args [pindex ++] = orig_args [fsig->hasthis + i];
2832 tmp_reg = alloc_preg (cfg);
2833 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2834 csig = sig_to_rgctx_sig (fsig);
2835 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2838 /* Emit an indirect call to the function descriptor ADDR */
2840 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2842 int addr_reg, arg_reg;
2843 MonoInst *call_target;
2845 g_assert (cfg->llvm_only);
2848 * addr points to a <addr, arg> pair, load both of them, and
2849 * make a call to addr, passing arg as an extra arg.
2851 addr_reg = alloc_preg (cfg);
2852 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2853 arg_reg = alloc_preg (cfg);
2854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2856 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2860 direct_icalls_enabled (MonoCompile *cfg)
2862 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2864 if (cfg->compile_llvm && !cfg->llvm_only)
2867 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2873 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2876 * Call the jit icall without a wrapper if possible.
2877 * The wrapper is needed for the following reasons:
2878 * - to handle exceptions thrown using mono_raise_exceptions () from the
2879 * icall function. The EH code needs the lmf frame pushed by the
2880 * wrapper to be able to unwind back to managed code.
2881 * - to be able to do stack walks for asynchronously suspended
2882 * threads when debugging.
2884 if (info->no_raise && direct_icalls_enabled (cfg)) {
2888 if (!info->wrapper_method) {
2889 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2890 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2892 mono_memory_barrier ();
2896 * Inline the wrapper method, which is basically a call to the C icall, and
2897 * an exception check.
2899 costs = inline_method (cfg, info->wrapper_method, NULL,
2900 args, NULL, il_offset, TRUE);
2901 g_assert (costs > 0);
2902 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2906 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2911 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2913 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2914 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2918 * Native code might return non register sized integers
2919 * without initializing the upper bits.
2921 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2922 case OP_LOADI1_MEMBASE:
2923 widen_op = OP_ICONV_TO_I1;
2925 case OP_LOADU1_MEMBASE:
2926 widen_op = OP_ICONV_TO_U1;
2928 case OP_LOADI2_MEMBASE:
2929 widen_op = OP_ICONV_TO_I2;
2931 case OP_LOADU2_MEMBASE:
2932 widen_op = OP_ICONV_TO_U2;
2938 if (widen_op != -1) {
2939 int dreg = alloc_preg (cfg);
2942 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2943 widen->type = ins->type;
2954 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
2956 MonoInst *args [16];
2958 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
2959 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
2961 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2965 get_memcpy_method (void)
2967 static MonoMethod *memcpy_method = NULL;
2968 if (!memcpy_method) {
2969 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2971 g_error ("Old corlib found. Install a new one");
2973 return memcpy_method;
2977 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2979 MonoClassField *field;
2980 gpointer iter = NULL;
2982 while ((field = mono_class_get_fields (klass, &iter))) {
2985 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2987 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2988 if (mini_type_is_reference (mono_field_get_type (field))) {
2989 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2990 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2992 MonoClass *field_class = mono_class_from_mono_type (field->type);
2993 if (field_class->has_references)
2994 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3000 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3002 int card_table_shift_bits;
3003 gpointer card_table_mask;
3005 MonoInst *dummy_use;
3006 int nursery_shift_bits;
3007 size_t nursery_size;
3009 if (!cfg->gen_write_barriers)
3012 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3014 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3016 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3019 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3020 wbarrier->sreg1 = ptr->dreg;
3021 wbarrier->sreg2 = value->dreg;
3022 MONO_ADD_INS (cfg->cbb, wbarrier);
3023 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3024 int offset_reg = alloc_preg (cfg);
3028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3029 if (card_table_mask)
3030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3032 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3033 * IMM's larger than 32bits.
3035 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3036 card_reg = ins->dreg;
3038 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3039 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3041 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3042 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3045 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3049 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3051 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3052 unsigned need_wb = 0;
3057 /*types with references can't have alignment smaller than sizeof(void*) */
3058 if (align < SIZEOF_VOID_P)
3061 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3062 if (size > 32 * SIZEOF_VOID_P)
3065 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3067 /* We don't unroll more than 5 stores to avoid code bloat. */
3068 if (size > 5 * SIZEOF_VOID_P) {
3069 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3070 size += (SIZEOF_VOID_P - 1);
3071 size &= ~(SIZEOF_VOID_P - 1);
3073 EMIT_NEW_ICONST (cfg, iargs [2], size);
3074 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3075 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3079 destreg = iargs [0]->dreg;
3080 srcreg = iargs [1]->dreg;
3083 dest_ptr_reg = alloc_preg (cfg);
3084 tmp_reg = alloc_preg (cfg);
3087 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3089 while (size >= SIZEOF_VOID_P) {
3090 MonoInst *load_inst;
3091 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3092 load_inst->dreg = tmp_reg;
3093 load_inst->inst_basereg = srcreg;
3094 load_inst->inst_offset = offset;
3095 MONO_ADD_INS (cfg->cbb, load_inst);
3097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3100 emit_write_barrier (cfg, iargs [0], load_inst);
3102 offset += SIZEOF_VOID_P;
3103 size -= SIZEOF_VOID_P;
3106 /*tmp += sizeof (void*)*/
3107 if (size >= SIZEOF_VOID_P) {
3108 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3109 MONO_ADD_INS (cfg->cbb, iargs [0]);
3113 /* Those cannot be references since size < sizeof (void*) */
3115 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3116 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3122 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3123 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3129 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3139 * Emit code to copy a valuetype of type @klass whose address is stored in
3140 * @src->dreg to memory whose address is stored at @dest->dreg.
3143 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3145 MonoInst *iargs [4];
3148 MonoMethod *memcpy_method;
3149 MonoInst *size_ins = NULL;
3150 MonoInst *memcpy_ins = NULL;
3154 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3157 * This check breaks with spilled vars... need to handle it during verification anyway.
3158 * g_assert (klass && klass == src->klass && klass == dest->klass);
3161 if (mini_is_gsharedvt_klass (klass)) {
3163 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3164 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3168 n = mono_class_native_size (klass, &align);
3170 n = mono_class_value_size (klass, &align);
3172 /* if native is true there should be no references in the struct */
3173 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3174 /* Avoid barriers when storing to the stack */
3175 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3176 (dest->opcode == OP_LDADDR))) {
3182 context_used = mini_class_check_context_used (cfg, klass);
3184 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3185 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3187 } else if (context_used) {
3188 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3190 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3191 if (!cfg->compile_aot)
3192 mono_class_compute_gc_descriptor (klass);
3196 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3198 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3203 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3204 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3205 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3210 iargs [2] = size_ins;
3212 EMIT_NEW_ICONST (cfg, iargs [2], n);
3214 memcpy_method = get_memcpy_method ();
3216 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3218 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3223 get_memset_method (void)
3225 static MonoMethod *memset_method = NULL;
3226 if (!memset_method) {
3227 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3229 g_error ("Old corlib found. Install a new one");
3231 return memset_method;
3235 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3237 MonoInst *iargs [3];
3240 MonoMethod *memset_method;
3241 MonoInst *size_ins = NULL;
3242 MonoInst *bzero_ins = NULL;
3243 static MonoMethod *bzero_method;
3245 /* FIXME: Optimize this for the case when dest is an LDADDR */
3246 mono_class_init (klass);
3247 if (mini_is_gsharedvt_klass (klass)) {
3248 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3249 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3251 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3252 g_assert (bzero_method);
3254 iargs [1] = size_ins;
3255 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3259 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3261 n = mono_class_value_size (klass, &align);
3263 if (n <= sizeof (gpointer) * 8) {
3264 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3267 memset_method = get_memset_method ();
3269 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3270 EMIT_NEW_ICONST (cfg, iargs [2], n);
3271 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3278 * Emit IR to return either the this pointer for instance method,
3279 * or the mrgctx for static methods.
3282 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3284 MonoInst *this_ins = NULL;
3286 g_assert (cfg->gshared);
3288 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3289 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3290 !method->klass->valuetype)
3291 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3293 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3294 MonoInst *mrgctx_loc, *mrgctx_var;
3296 g_assert (!this_ins);
3297 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3299 mrgctx_loc = mono_get_vtable_var (cfg);
3300 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3303 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3304 MonoInst *vtable_loc, *vtable_var;
3306 g_assert (!this_ins);
3308 vtable_loc = mono_get_vtable_var (cfg);
3309 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3311 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3312 MonoInst *mrgctx_var = vtable_var;
3315 vtable_reg = alloc_preg (cfg);
3316 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3317 vtable_var->type = STACK_PTR;
3325 vtable_reg = alloc_preg (cfg);
3326 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3331 static MonoJumpInfoRgctxEntry *
3332 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3334 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3335 res->method = method;
3336 res->in_mrgctx = in_mrgctx;
3337 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3338 res->data->type = patch_type;
3339 res->data->data.target = patch_data;
3340 res->info_type = info_type;
3345 static inline MonoInst*
3346 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3348 MonoInst *args [16];
3351 // FIXME: No fastpath since the slot is not a compile time constant
3353 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3354 if (entry->in_mrgctx)
3355 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3357 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3361 * FIXME: This can be called during decompose, which is a problem since it creates
3363 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3365 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3367 MonoBasicBlock *is_null_bb, *end_bb;
3368 MonoInst *res, *ins, *call;
3371 slot = mini_get_rgctx_entry_slot (entry);
3373 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3374 index = MONO_RGCTX_SLOT_INDEX (slot);
3376 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3377 for (depth = 0; ; ++depth) {
3378 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3380 if (index < size - 1)
3385 NEW_BBLOCK (cfg, end_bb);
3386 NEW_BBLOCK (cfg, is_null_bb);
3389 rgctx_reg = rgctx->dreg;
3391 rgctx_reg = alloc_preg (cfg);
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3394 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3395 NEW_BBLOCK (cfg, is_null_bb);
3397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3401 for (i = 0; i < depth; ++i) {
3402 int array_reg = alloc_preg (cfg);
3404 /* load ptr to next array */
3405 if (mrgctx && i == 0)
3406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3409 rgctx_reg = array_reg;
3410 /* is the ptr null? */
3411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3412 /* if yes, jump to actual trampoline */
3413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3417 val_reg = alloc_preg (cfg);
3418 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3419 /* is the slot null? */
3420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3421 /* if yes, jump to actual trampoline */
3422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3425 res_reg = alloc_preg (cfg);
3426 MONO_INST_NEW (cfg, ins, OP_MOVE);
3427 ins->dreg = res_reg;
3428 ins->sreg1 = val_reg;
3429 MONO_ADD_INS (cfg->cbb, ins);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3434 MONO_START_BB (cfg, is_null_bb);
3436 EMIT_NEW_ICONST (cfg, args [1], index);
3438 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3440 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3441 MONO_INST_NEW (cfg, ins, OP_MOVE);
3442 ins->dreg = res_reg;
3443 ins->sreg1 = call->dreg;
3444 MONO_ADD_INS (cfg->cbb, ins);
3445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3447 MONO_START_BB (cfg, end_bb);
3456 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3459 static inline MonoInst*
3460 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3463 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3465 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3469 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3470 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3472 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3473 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3475 return emit_rgctx_fetch (cfg, rgctx, entry);
3479 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3480 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3482 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3483 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3485 return emit_rgctx_fetch (cfg, rgctx, entry);
3489 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3490 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3492 MonoJumpInfoGSharedVtCall *call_info;
3493 MonoJumpInfoRgctxEntry *entry;
3496 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3497 call_info->sig = sig;
3498 call_info->method = cmethod;
3500 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3501 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3503 return emit_rgctx_fetch (cfg, rgctx, entry);
3507 * emit_get_rgctx_virt_method:
3509 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3512 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3513 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3515 MonoJumpInfoVirtMethod *info;
3516 MonoJumpInfoRgctxEntry *entry;
3519 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3520 info->klass = klass;
3521 info->method = virt_method;
3523 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3524 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3526 return emit_rgctx_fetch (cfg, rgctx, entry);
3530 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3531 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3533 MonoJumpInfoRgctxEntry *entry;
3536 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3537 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3539 return emit_rgctx_fetch (cfg, rgctx, entry);
3543 * emit_get_rgctx_method:
3545 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3546 * normal constants, else emit a load from the rgctx.
3549 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3550 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3552 if (!context_used) {
3555 switch (rgctx_type) {
3556 case MONO_RGCTX_INFO_METHOD:
3557 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3559 case MONO_RGCTX_INFO_METHOD_RGCTX:
3560 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3563 g_assert_not_reached ();
3566 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3567 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3569 return emit_rgctx_fetch (cfg, rgctx, entry);
3574 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3575 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3577 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3578 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3580 return emit_rgctx_fetch (cfg, rgctx, entry);
3584 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3586 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3587 MonoRuntimeGenericContextInfoTemplate *template_;
3592 for (i = 0; i < info->num_entries; ++i) {
3593 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3595 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3599 if (info->num_entries == info->count_entries) {
3600 MonoRuntimeGenericContextInfoTemplate *new_entries;
3601 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3603 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3605 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3606 info->entries = new_entries;
3607 info->count_entries = new_count_entries;
3610 idx = info->num_entries;
3611 template_ = &info->entries [idx];
3612 template_->info_type = rgctx_type;
3613 template_->data = data;
3615 info->num_entries ++;
3621 * emit_get_gsharedvt_info:
3623 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3626 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3631 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3632 /* Load info->entries [idx] */
3633 dreg = alloc_preg (cfg);
3634 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3640 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3642 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3646 * On return the caller must check @klass for load errors.
3649 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3651 MonoInst *vtable_arg;
3654 context_used = mini_class_check_context_used (cfg, klass);
3657 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3658 klass, MONO_RGCTX_INFO_VTABLE);
3660 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3664 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3667 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3671 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3672 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3674 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3675 ins->sreg1 = vtable_arg->dreg;
3676 MONO_ADD_INS (cfg->cbb, ins);
3679 MonoBasicBlock *inited_bb;
3680 MonoInst *args [16];
3682 inited_reg = alloc_ireg (cfg);
3684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3686 NEW_BBLOCK (cfg, inited_bb);
3688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3691 args [0] = vtable_arg;
3692 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3694 MONO_START_BB (cfg, inited_bb);
3699 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3703 if (cfg->gen_seq_points && cfg->method == method) {
3704 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3706 ins->flags |= MONO_INST_NONEMPTY_STACK;
3707 MONO_ADD_INS (cfg->cbb, ins);
3712 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3714 if (mini_get_debug_options ()->better_cast_details) {
3715 int vtable_reg = alloc_preg (cfg);
3716 int klass_reg = alloc_preg (cfg);
3717 MonoBasicBlock *is_null_bb = NULL;
3719 int to_klass_reg, context_used;
3722 NEW_BBLOCK (cfg, is_null_bb);
3724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3728 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3730 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3739 context_used = mini_class_check_context_used (cfg, klass);
3741 MonoInst *class_ins;
3743 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3744 to_klass_reg = class_ins->dreg;
3746 to_klass_reg = alloc_preg (cfg);
3747 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3752 MONO_START_BB (cfg, is_null_bb);
3757 mini_reset_cast_details (MonoCompile *cfg)
3759 /* Reset the variables holding the cast details */
3760 if (mini_get_debug_options ()->better_cast_details) {
3761 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3762 /* It is enough to reset the from field */
3763 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3768 * On return the caller must check @array_class for load errors
3771 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3773 int vtable_reg = alloc_preg (cfg);
3776 context_used = mini_class_check_context_used (cfg, array_class);
3778 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3780 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3782 if (cfg->opt & MONO_OPT_SHARED) {
3783 int class_reg = alloc_preg (cfg);
3786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3787 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3788 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3789 } else if (context_used) {
3790 MonoInst *vtable_ins;
3792 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3793 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3795 if (cfg->compile_aot) {
3799 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3801 vt_reg = alloc_preg (cfg);
3802 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3803 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3806 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3812 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3814 mini_reset_cast_details (cfg);
3818 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3819 * generic code is generated.
3822 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3824 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3827 MonoInst *rgctx, *addr;
3829 /* FIXME: What if the class is shared? We might not
3830 have to get the address of the method from the
3832 addr = emit_get_rgctx_method (cfg, context_used, method,
3833 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3834 if (cfg->llvm_only) {
3835 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3836 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3838 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3840 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3843 gboolean pass_vtable, pass_mrgctx;
3844 MonoInst *rgctx_arg = NULL;
3846 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3847 g_assert (!pass_mrgctx);
3850 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3853 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3856 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3861 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3865 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3866 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3867 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3868 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3870 obj_reg = sp [0]->dreg;
3871 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3872 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3874 /* FIXME: generics */
3875 g_assert (klass->rank == 0);
3878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3879 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3885 MonoInst *element_class;
3887 /* This assertion is from the unboxcast insn */
3888 g_assert (klass->rank == 0);
3890 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3891 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3893 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3894 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3896 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3897 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3898 mini_reset_cast_details (cfg);
3901 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3902 MONO_ADD_INS (cfg->cbb, add);
3903 add->type = STACK_MP;
3910 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3912 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3913 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3917 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3923 args [1] = klass_inst;
3926 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3928 NEW_BBLOCK (cfg, is_ref_bb);
3929 NEW_BBLOCK (cfg, is_nullable_bb);
3930 NEW_BBLOCK (cfg, end_bb);
3931 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3932 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3938 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3939 addr_reg = alloc_dreg (cfg, STACK_MP);
3943 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3944 MONO_ADD_INS (cfg->cbb, addr);
3946 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3949 MONO_START_BB (cfg, is_ref_bb);
3951 /* Save the ref to a temporary */
3952 dreg = alloc_ireg (cfg);
3953 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3954 addr->dreg = addr_reg;
3955 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3959 MONO_START_BB (cfg, is_nullable_bb);
3962 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3963 MonoInst *unbox_call;
3964 MonoMethodSignature *unbox_sig;
3966 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3967 unbox_sig->ret = &klass->byval_arg;
3968 unbox_sig->param_count = 1;
3969 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3972 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3974 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3976 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3977 addr->dreg = addr_reg;
3980 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3983 MONO_START_BB (cfg, end_bb);
3986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3992 * Returns NULL and set the cfg exception on error.
3995 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3997 MonoInst *iargs [2];
4002 MonoRgctxInfoType rgctx_info;
4003 MonoInst *iargs [2];
4004 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4006 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4008 if (cfg->opt & MONO_OPT_SHARED)
4009 rgctx_info = MONO_RGCTX_INFO_KLASS;
4011 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4012 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4014 if (cfg->opt & MONO_OPT_SHARED) {
4015 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4017 alloc_ftn = ves_icall_object_new;
4020 alloc_ftn = ves_icall_object_new_specific;
4023 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4024 if (known_instance_size) {
4025 int size = mono_class_instance_size (klass);
4026 if (size < sizeof (MonoObject))
4027 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4029 EMIT_NEW_ICONST (cfg, iargs [1], size);
4031 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4034 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4037 if (cfg->opt & MONO_OPT_SHARED) {
4038 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4039 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4041 alloc_ftn = ves_icall_object_new;
4042 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4043 /* This happens often in argument checking code, eg. throw new FooException... */
4044 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4045 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4046 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4048 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4049 MonoMethod *managed_alloc = NULL;
4053 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4054 cfg->exception_ptr = klass;
4058 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4060 if (managed_alloc) {
4061 int size = mono_class_instance_size (klass);
4062 if (size < sizeof (MonoObject))
4063 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4065 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4066 EMIT_NEW_ICONST (cfg, iargs [1], size);
4067 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4069 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4071 guint32 lw = vtable->klass->instance_size;
4072 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4073 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4074 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4077 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4081 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4085 * Returns NULL and set the cfg exception on error.
4088 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4090 MonoInst *alloc, *ins;
4092 if (mono_class_is_nullable (klass)) {
4093 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4096 if (cfg->llvm_only && cfg->gsharedvt) {
4097 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4098 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4099 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4101 /* FIXME: What if the class is shared? We might not
4102 have to get the method address from the RGCTX. */
4103 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4104 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4105 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4107 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4110 gboolean pass_vtable, pass_mrgctx;
4111 MonoInst *rgctx_arg = NULL;
4113 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4114 g_assert (!pass_mrgctx);
4117 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4120 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4123 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4127 if (mini_is_gsharedvt_klass (klass)) {
4128 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4129 MonoInst *res, *is_ref, *src_var, *addr;
4132 dreg = alloc_ireg (cfg);
4134 NEW_BBLOCK (cfg, is_ref_bb);
4135 NEW_BBLOCK (cfg, is_nullable_bb);
4136 NEW_BBLOCK (cfg, end_bb);
4137 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4145 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4148 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4149 ins->opcode = OP_STOREV_MEMBASE;
4151 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4152 res->type = STACK_OBJ;
4154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4157 MONO_START_BB (cfg, is_ref_bb);
4159 /* val is a vtype, so has to load the value manually */
4160 src_var = get_vreg_to_inst (cfg, val->dreg);
4162 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4163 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4168 MONO_START_BB (cfg, is_nullable_bb);
4171 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4172 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4174 MonoMethodSignature *box_sig;
4177 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4178 * construct that method at JIT time, so have to do things by hand.
4180 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4181 box_sig->ret = &mono_defaults.object_class->byval_arg;
4182 box_sig->param_count = 1;
4183 box_sig->params [0] = &klass->byval_arg;
4186 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4188 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4189 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4190 res->type = STACK_OBJ;
4194 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4196 MONO_START_BB (cfg, end_bb);
4200 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4204 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4209 static GHashTable* direct_icall_type_hash;
4212 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4214 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4215 if (!direct_icalls_enabled (cfg))
4219 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4220 * Whitelist a few icalls for now.
4222 if (!direct_icall_type_hash) {
4223 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4225 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4226 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4227 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4228 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4229 mono_memory_barrier ();
4230 direct_icall_type_hash = h;
4233 if (cmethod->klass == mono_defaults.math_class)
4235 /* No locking needed */
4236 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4242 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4244 if (cmethod->klass == mono_defaults.systemtype_class) {
4245 if (!strcmp (cmethod->name, "GetType"))
4251 static G_GNUC_UNUSED MonoInst*
4252 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4254 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4255 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4258 switch (enum_type->type) {
4261 #if SIZEOF_REGISTER == 8
4273 MonoInst *load, *and_, *cmp, *ceq;
4274 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4275 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4276 int dest_reg = alloc_ireg (cfg);
4278 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4279 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4280 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4281 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4283 ceq->type = STACK_I4;
4286 load = mono_decompose_opcode (cfg, load);
4287 and_ = mono_decompose_opcode (cfg, and_);
4288 cmp = mono_decompose_opcode (cfg, cmp);
4289 ceq = mono_decompose_opcode (cfg, ceq);
4297 * Returns NULL and set the cfg exception on error.
4299 static G_GNUC_UNUSED MonoInst*
4300 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4304 gpointer trampoline;
4305 MonoInst *obj, *method_ins, *tramp_ins;
4309 if (virtual_ && !cfg->llvm_only) {
4310 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4313 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4317 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4321 /* Inline the contents of mono_delegate_ctor */
4323 /* Set target field */
4324 /* Optimize away setting of NULL target */
4325 if (!MONO_INS_IS_PCONST_NULL (target)) {
4326 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4327 if (cfg->gen_write_barriers) {
4328 dreg = alloc_preg (cfg);
4329 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4330 emit_write_barrier (cfg, ptr, target);
4334 /* Set method field */
4335 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4336 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4339 * To avoid looking up the compiled code belonging to the target method
4340 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4341 * store it, and we fill it after the method has been compiled.
4343 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4344 MonoInst *code_slot_ins;
4347 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4349 domain = mono_domain_get ();
4350 mono_domain_lock (domain);
4351 if (!domain_jit_info (domain)->method_code_hash)
4352 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4353 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4355 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4356 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4358 mono_domain_unlock (domain);
4360 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4365 if (cfg->llvm_only) {
4366 MonoInst *args [16];
4371 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4372 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4375 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4381 if (cfg->compile_aot) {
4382 MonoDelegateClassMethodPair *del_tramp;
4384 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4385 del_tramp->klass = klass;
4386 del_tramp->method = context_used ? NULL : method;
4387 del_tramp->is_virtual = virtual_;
4388 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4391 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4393 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4394 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4397 /* Set invoke_impl field */
4399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4401 dreg = alloc_preg (cfg);
4402 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4403 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4405 dreg = alloc_preg (cfg);
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4410 dreg = alloc_preg (cfg);
4411 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4412 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4414 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4420 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4422 MonoJitICallInfo *info;
4424 /* Need to register the icall so it gets an icall wrapper */
4425 info = mono_get_array_new_va_icall (rank);
4427 cfg->flags |= MONO_CFG_HAS_VARARGS;
4429 /* mono_array_new_va () needs a vararg calling convention */
4430 cfg->exception_message = g_strdup ("array-new");
4431 cfg->disable_llvm = TRUE;
4433 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4434 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4438 * handle_constrained_gsharedvt_call:
4440 * Handle constrained calls where the receiver is a gsharedvt type.
4441 * Return the instruction representing the call. Set the cfg exception on failure.
4444 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4445 gboolean *ref_emit_widen)
4447 MonoInst *ins = NULL;
4448 gboolean emit_widen = *ref_emit_widen;
4451 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4452 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4453 * pack the arguments into an array, and do the rest of the work in in an icall.
4455 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4456 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4457 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4458 MonoInst *args [16];
4461 * This case handles calls to
4462 * - object:ToString()/Equals()/GetHashCode(),
4463 * - System.IComparable<T>:CompareTo()
4464 * - System.IEquatable<T>:Equals ()
4465 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4469 if (mono_method_check_context_used (cmethod))
4470 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4472 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4473 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4475 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4476 if (fsig->hasthis && fsig->param_count) {
4477 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4478 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4479 ins->dreg = alloc_preg (cfg);
4480 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4481 MONO_ADD_INS (cfg->cbb, ins);
4484 if (mini_is_gsharedvt_type (fsig->params [0])) {
4485 int addr_reg, deref_arg_reg;
4487 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4488 deref_arg_reg = alloc_preg (cfg);
4489 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4490 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4492 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4493 addr_reg = ins->dreg;
4494 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4496 EMIT_NEW_ICONST (cfg, args [3], 0);
4497 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4500 EMIT_NEW_ICONST (cfg, args [3], 0);
4501 EMIT_NEW_ICONST (cfg, args [4], 0);
4503 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4506 if (mini_is_gsharedvt_type (fsig->ret)) {
4507 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4508 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4512 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4513 MONO_ADD_INS (cfg->cbb, add);
4515 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4516 MONO_ADD_INS (cfg->cbb, ins);
4517 /* ins represents the call result */
4520 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4523 *ref_emit_widen = emit_widen;
4532 mono_emit_load_got_addr (MonoCompile *cfg)
4534 MonoInst *getaddr, *dummy_use;
4536 if (!cfg->got_var || cfg->got_var_allocated)
4539 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4540 getaddr->cil_code = cfg->header->code;
4541 getaddr->dreg = cfg->got_var->dreg;
4543 /* Add it to the start of the first bblock */
4544 if (cfg->bb_entry->code) {
4545 getaddr->next = cfg->bb_entry->code;
4546 cfg->bb_entry->code = getaddr;
4549 MONO_ADD_INS (cfg->bb_entry, getaddr);
4551 cfg->got_var_allocated = TRUE;
4554 * Add a dummy use to keep the got_var alive, since real uses might
4555 * only be generated by the back ends.
4556 * Add it to end_bblock, so the variable's lifetime covers the whole
4558 * It would be better to make the usage of the got var explicit in all
4559 * cases when the backend needs it (i.e. calls, throw etc.), so this
4560 * wouldn't be needed.
4562 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4563 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4566 static int inline_limit;
4567 static gboolean inline_limit_inited;
4570 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4572 MonoMethodHeaderSummary header;
4574 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4575 MonoMethodSignature *sig = mono_method_signature (method);
4579 if (cfg->disable_inline)
4584 if (cfg->inline_depth > 10)
4587 if (!mono_method_get_header_summary (method, &header))
4590 /*runtime, icall and pinvoke are checked by summary call*/
4591 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4592 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4593 (mono_class_is_marshalbyref (method->klass)) ||
4597 /* also consider num_locals? */
4598 /* Do the size check early to avoid creating vtables */
4599 if (!inline_limit_inited) {
4600 if (g_getenv ("MONO_INLINELIMIT"))
4601 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4603 inline_limit = INLINE_LENGTH_LIMIT;
4604 inline_limit_inited = TRUE;
4606 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4610 * if we can initialize the class of the method right away, we do,
4611 * otherwise we don't allow inlining if the class needs initialization,
4612 * since it would mean inserting a call to mono_runtime_class_init()
4613 * inside the inlined code
4615 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4618 if (!(cfg->opt & MONO_OPT_SHARED)) {
4619 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4620 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4621 if (method->klass->has_cctor) {
4622 vtable = mono_class_vtable (cfg->domain, method->klass);
4625 if (!cfg->compile_aot) {
4627 if (!mono_runtime_class_init_full (vtable, &error)) {
4628 mono_error_cleanup (&error);
4633 } else if (mono_class_is_before_field_init (method->klass)) {
4634 if (cfg->run_cctors && method->klass->has_cctor) {
4635 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4636 if (!method->klass->runtime_info)
4637 /* No vtable created yet */
4639 vtable = mono_class_vtable (cfg->domain, method->klass);
4642 /* This makes so that inline cannot trigger */
4643 /* .cctors: too many apps depend on them */
4644 /* running with a specific order... */
4645 if (! vtable->initialized)
4648 if (!mono_runtime_class_init_full (vtable, &error)) {
4649 mono_error_cleanup (&error);
4653 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4654 if (!method->klass->runtime_info)
4655 /* No vtable created yet */
4657 vtable = mono_class_vtable (cfg->domain, method->klass);
4660 if (!vtable->initialized)
4665 * If we're compiling for shared code
4666 * the cctor will need to be run at aot method load time, for example,
4667 * or at the end of the compilation of the inlining method.
4669 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4673 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4674 if (mono_arch_is_soft_float ()) {
4676 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4678 for (i = 0; i < sig->param_count; ++i)
4679 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4684 if (g_list_find (cfg->dont_inline, method))
4691 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4693 if (!cfg->compile_aot) {
4695 if (vtable->initialized)
4699 if (mono_class_is_before_field_init (klass)) {
4700 if (cfg->method == method)
4704 if (!mono_class_needs_cctor_run (klass, method))
4707 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4708 /* The initialization is already done before the method is called */
4715 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4719 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4722 if (mini_is_gsharedvt_variable_klass (klass)) {
4725 mono_class_init (klass);
4726 size = mono_class_array_element_size (klass);
4729 mult_reg = alloc_preg (cfg);
4730 array_reg = arr->dreg;
4731 index_reg = index->dreg;
4733 #if SIZEOF_REGISTER == 8
4734 /* The array reg is 64 bits but the index reg is only 32 */
4735 if (COMPILE_LLVM (cfg)) {
4737 index2_reg = index_reg;
4739 index2_reg = alloc_preg (cfg);
4740 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4743 if (index->type == STACK_I8) {
4744 index2_reg = alloc_preg (cfg);
4745 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4747 index2_reg = index_reg;
4752 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4754 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4755 if (size == 1 || size == 2 || size == 4 || size == 8) {
4756 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4758 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4759 ins->klass = mono_class_get_element_class (klass);
4760 ins->type = STACK_MP;
4766 add_reg = alloc_ireg_mp (cfg);
4769 MonoInst *rgctx_ins;
4772 g_assert (cfg->gshared);
4773 context_used = mini_class_check_context_used (cfg, klass);
4774 g_assert (context_used);
4775 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4776 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4780 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4781 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4782 ins->klass = mono_class_get_element_class (klass);
4783 ins->type = STACK_MP;
4784 MONO_ADD_INS (cfg->cbb, ins);
4790 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4792 int bounds_reg = alloc_preg (cfg);
4793 int add_reg = alloc_ireg_mp (cfg);
4794 int mult_reg = alloc_preg (cfg);
4795 int mult2_reg = alloc_preg (cfg);
4796 int low1_reg = alloc_preg (cfg);
4797 int low2_reg = alloc_preg (cfg);
4798 int high1_reg = alloc_preg (cfg);
4799 int high2_reg = alloc_preg (cfg);
4800 int realidx1_reg = alloc_preg (cfg);
4801 int realidx2_reg = alloc_preg (cfg);
4802 int sum_reg = alloc_preg (cfg);
4803 int index1, index2, tmpreg;
4807 mono_class_init (klass);
4808 size = mono_class_array_element_size (klass);
4810 index1 = index_ins1->dreg;
4811 index2 = index_ins2->dreg;
4813 #if SIZEOF_REGISTER == 8
4814 /* The array reg is 64 bits but the index reg is only 32 */
4815 if (COMPILE_LLVM (cfg)) {
4818 tmpreg = alloc_preg (cfg);
4819 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4821 tmpreg = alloc_preg (cfg);
4822 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4826 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4830 /* range checking */
4831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4832 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4835 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4836 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4837 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4838 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4839 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4840 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4842 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4843 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4844 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4846 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4848 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4850 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4851 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4853 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4854 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4856 ins->type = STACK_MP;
4858 MONO_ADD_INS (cfg->cbb, ins);
4864 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4868 MonoMethod *addr_method;
4870 MonoClass *eclass = cmethod->klass->element_class;
4872 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4875 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4877 /* emit_ldelema_2 depends on OP_LMUL */
4878 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4879 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4882 if (mini_is_gsharedvt_variable_klass (eclass))
4885 element_size = mono_class_array_element_size (eclass);
4886 addr_method = mono_marshal_get_array_address (rank, element_size);
4887 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4892 static MonoBreakPolicy
4893 always_insert_breakpoint (MonoMethod *method)
4895 return MONO_BREAK_POLICY_ALWAYS;
4898 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4901 * mono_set_break_policy:
4902 * policy_callback: the new callback function
4904 * Allow embedders to decide wherther to actually obey breakpoint instructions
4905 * (both break IL instructions and Debugger.Break () method calls), for example
4906 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4907 * untrusted or semi-trusted code.
4909 * @policy_callback will be called every time a break point instruction needs to
4910 * be inserted with the method argument being the method that calls Debugger.Break()
4911 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4912 * if it wants the breakpoint to not be effective in the given method.
4913 * #MONO_BREAK_POLICY_ALWAYS is the default.
4916 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4918 if (policy_callback)
4919 break_policy_func = policy_callback;
4921 break_policy_func = always_insert_breakpoint;
4925 should_insert_brekpoint (MonoMethod *method) {
4926 switch (break_policy_func (method)) {
4927 case MONO_BREAK_POLICY_ALWAYS:
4929 case MONO_BREAK_POLICY_NEVER:
4931 case MONO_BREAK_POLICY_ON_DBG:
4932 g_warning ("mdb no longer supported");
4935 g_warning ("Incorrect value returned from break policy callback");
4940 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4942 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4944 MonoInst *addr, *store, *load;
4945 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4947 /* the bounds check is already done by the callers */
4948 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4950 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4951 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4952 if (mini_type_is_reference (&eklass->byval_arg))
4953 emit_write_barrier (cfg, addr, load);
4955 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4956 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4963 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4965 return mini_type_is_reference (&klass->byval_arg);
4969 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4971 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4972 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4973 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4974 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4975 MonoInst *iargs [3];
4978 mono_class_setup_vtable (obj_array);
4979 g_assert (helper->slot);
4981 if (sp [0]->type != STACK_OBJ)
4983 if (sp [2]->type != STACK_OBJ)
4990 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4994 if (mini_is_gsharedvt_variable_klass (klass)) {
4997 // FIXME-VT: OP_ICONST optimization
4998 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4999 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5000 ins->opcode = OP_STOREV_MEMBASE;
5001 } else if (sp [1]->opcode == OP_ICONST) {
5002 int array_reg = sp [0]->dreg;
5003 int index_reg = sp [1]->dreg;
5004 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5006 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5007 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5010 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5011 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5013 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5014 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5015 if (generic_class_is_reference_type (cfg, klass))
5016 emit_write_barrier (cfg, addr, sp [2]);
5023 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5028 eklass = mono_class_from_mono_type (fsig->params [2]);
5030 eklass = mono_class_from_mono_type (fsig->ret);
5033 return emit_array_store (cfg, eklass, args, FALSE);
5035 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5036 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5042 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5045 int param_size, return_size;
5047 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5048 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5050 if (cfg->verbose_level > 3)
5051 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5053 //Don't allow mixing reference types with value types
5054 if (param_klass->valuetype != return_klass->valuetype) {
5055 if (cfg->verbose_level > 3)
5056 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5060 if (!param_klass->valuetype) {
5061 if (cfg->verbose_level > 3)
5062 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5067 if (param_klass->has_references || return_klass->has_references)
5070 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5071 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5072 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5073 if (cfg->verbose_level > 3)
5074 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5078 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5079 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5080 if (cfg->verbose_level > 3)
5081 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5085 param_size = mono_class_value_size (param_klass, &align);
5086 return_size = mono_class_value_size (return_klass, &align);
5088 //We can do it if sizes match
5089 if (param_size == return_size) {
5090 if (cfg->verbose_level > 3)
5091 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5095 //No simple way to handle struct if sizes don't match
5096 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5097 if (cfg->verbose_level > 3)
5098 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5103 * Same reg size category.
5104 * A quick note on why we don't require widening here.
5105 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5107 * Since the source value comes from a function argument, the JIT will already have
5108 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5110 if (param_size <= 4 && return_size <= 4) {
5111 if (cfg->verbose_level > 3)
5112 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5120 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5122 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5123 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5125 if (mini_is_gsharedvt_variable_type (fsig->ret))
5128 //Valuetypes that are semantically equivalent or numbers than can be widened to
5129 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5132 //Arrays of valuetypes that are semantically equivalent
5133 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5140 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5142 #ifdef MONO_ARCH_SIMD_INTRINSICS
5143 MonoInst *ins = NULL;
5145 if (cfg->opt & MONO_OPT_SIMD) {
5146 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5152 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5156 emit_memory_barrier (MonoCompile *cfg, int kind)
5158 MonoInst *ins = NULL;
5159 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5160 MONO_ADD_INS (cfg->cbb, ins);
5161 ins->backend.memory_barrier_kind = kind;
5167 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5169 MonoInst *ins = NULL;
5172 /* The LLVM backend supports these intrinsics */
5173 if (cmethod->klass == mono_defaults.math_class) {
5174 if (strcmp (cmethod->name, "Sin") == 0) {
5176 } else if (strcmp (cmethod->name, "Cos") == 0) {
5178 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5180 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5184 if (opcode && fsig->param_count == 1) {
5185 MONO_INST_NEW (cfg, ins, opcode);
5186 ins->type = STACK_R8;
5187 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5188 ins->sreg1 = args [0]->dreg;
5189 MONO_ADD_INS (cfg->cbb, ins);
5193 if (cfg->opt & MONO_OPT_CMOV) {
5194 if (strcmp (cmethod->name, "Min") == 0) {
5195 if (fsig->params [0]->type == MONO_TYPE_I4)
5197 if (fsig->params [0]->type == MONO_TYPE_U4)
5198 opcode = OP_IMIN_UN;
5199 else if (fsig->params [0]->type == MONO_TYPE_I8)
5201 else if (fsig->params [0]->type == MONO_TYPE_U8)
5202 opcode = OP_LMIN_UN;
5203 } else if (strcmp (cmethod->name, "Max") == 0) {
5204 if (fsig->params [0]->type == MONO_TYPE_I4)
5206 if (fsig->params [0]->type == MONO_TYPE_U4)
5207 opcode = OP_IMAX_UN;
5208 else if (fsig->params [0]->type == MONO_TYPE_I8)
5210 else if (fsig->params [0]->type == MONO_TYPE_U8)
5211 opcode = OP_LMAX_UN;
5215 if (opcode && fsig->param_count == 2) {
5216 MONO_INST_NEW (cfg, ins, opcode);
5217 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5218 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5219 ins->sreg1 = args [0]->dreg;
5220 ins->sreg2 = args [1]->dreg;
5221 MONO_ADD_INS (cfg->cbb, ins);
5229 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5231 if (cmethod->klass == mono_defaults.array_class) {
5232 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5233 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5234 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5235 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5236 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5237 return emit_array_unsafe_mov (cfg, fsig, args);
5244 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5246 MonoInst *ins = NULL;
5248 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5250 if (cmethod->klass == mono_defaults.string_class) {
5251 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5252 int dreg = alloc_ireg (cfg);
5253 int index_reg = alloc_preg (cfg);
5254 int add_reg = alloc_preg (cfg);
5256 #if SIZEOF_REGISTER == 8
5257 if (COMPILE_LLVM (cfg)) {
5258 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5260 /* The array reg is 64 bits but the index reg is only 32 */
5261 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5264 index_reg = args [1]->dreg;
5266 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5268 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5269 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5270 add_reg = ins->dreg;
5271 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5274 int mult_reg = alloc_preg (cfg);
5275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5276 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5278 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5280 type_from_op (cfg, ins, NULL, NULL);
5282 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5283 int dreg = alloc_ireg (cfg);
5284 /* Decompose later to allow more optimizations */
5285 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5286 ins->type = STACK_I4;
5287 ins->flags |= MONO_INST_FAULT;
5288 cfg->cbb->has_array_access = TRUE;
5289 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5294 } else if (cmethod->klass == mono_defaults.object_class) {
5295 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5296 int dreg = alloc_ireg_ref (cfg);
5297 int vt_reg = alloc_preg (cfg);
5298 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5299 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5300 type_from_op (cfg, ins, NULL, NULL);
5303 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5304 int dreg = alloc_ireg (cfg);
5305 int t1 = alloc_ireg (cfg);
5307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5308 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5309 ins->type = STACK_I4;
5312 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5313 MONO_INST_NEW (cfg, ins, OP_NOP);
5314 MONO_ADD_INS (cfg->cbb, ins);
5318 } else if (cmethod->klass == mono_defaults.array_class) {
5319 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5320 return emit_array_generic_access (cfg, fsig, args, FALSE);
5321 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5322 return emit_array_generic_access (cfg, fsig, args, TRUE);
5324 #ifndef MONO_BIG_ARRAYS
5326 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5329 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5330 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5331 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5332 int dreg = alloc_ireg (cfg);
5333 int bounds_reg = alloc_ireg_mp (cfg);
5334 MonoBasicBlock *end_bb, *szarray_bb;
5335 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5337 NEW_BBLOCK (cfg, end_bb);
5338 NEW_BBLOCK (cfg, szarray_bb);
5340 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5341 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5344 /* Non-szarray case */
5346 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5347 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5349 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5350 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5352 MONO_START_BB (cfg, szarray_bb);
5355 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5356 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5358 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5359 MONO_START_BB (cfg, end_bb);
5361 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5362 ins->type = STACK_I4;
5368 if (cmethod->name [0] != 'g')
5371 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5372 int dreg = alloc_ireg (cfg);
5373 int vtable_reg = alloc_preg (cfg);
5374 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5375 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5376 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5377 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5378 type_from_op (cfg, ins, NULL, NULL);
5381 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5382 int dreg = alloc_ireg (cfg);
5384 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5385 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5386 type_from_op (cfg, ins, NULL, NULL);
5391 } else if (cmethod->klass == runtime_helpers_class) {
5392 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5393 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5397 } else if (cmethod->klass == mono_defaults.monitor_class) {
5398 gboolean is_enter = FALSE;
5399 gboolean is_v4 = FALSE;
5401 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5405 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5410 * To make async stack traces work, icalls which can block should have a wrapper.
5411 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5413 MonoBasicBlock *end_bb;
5415 NEW_BBLOCK (cfg, end_bb);
5417 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5420 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5421 MONO_START_BB (cfg, end_bb);
5424 } else if (cmethod->klass == mono_defaults.thread_class) {
5425 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5426 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5427 MONO_ADD_INS (cfg->cbb, ins);
5429 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5430 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5431 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5433 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5435 if (fsig->params [0]->type == MONO_TYPE_I1)
5436 opcode = OP_LOADI1_MEMBASE;
5437 else if (fsig->params [0]->type == MONO_TYPE_U1)
5438 opcode = OP_LOADU1_MEMBASE;
5439 else if (fsig->params [0]->type == MONO_TYPE_I2)
5440 opcode = OP_LOADI2_MEMBASE;
5441 else if (fsig->params [0]->type == MONO_TYPE_U2)
5442 opcode = OP_LOADU2_MEMBASE;
5443 else if (fsig->params [0]->type == MONO_TYPE_I4)
5444 opcode = OP_LOADI4_MEMBASE;
5445 else if (fsig->params [0]->type == MONO_TYPE_U4)
5446 opcode = OP_LOADU4_MEMBASE;
5447 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5448 opcode = OP_LOADI8_MEMBASE;
5449 else if (fsig->params [0]->type == MONO_TYPE_R4)
5450 opcode = OP_LOADR4_MEMBASE;
5451 else if (fsig->params [0]->type == MONO_TYPE_R8)
5452 opcode = OP_LOADR8_MEMBASE;
5453 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5454 opcode = OP_LOAD_MEMBASE;
5457 MONO_INST_NEW (cfg, ins, opcode);
5458 ins->inst_basereg = args [0]->dreg;
5459 ins->inst_offset = 0;
5460 MONO_ADD_INS (cfg->cbb, ins);
5462 switch (fsig->params [0]->type) {
5469 ins->dreg = mono_alloc_ireg (cfg);
5470 ins->type = STACK_I4;
5474 ins->dreg = mono_alloc_lreg (cfg);
5475 ins->type = STACK_I8;
5479 ins->dreg = mono_alloc_ireg (cfg);
5480 #if SIZEOF_REGISTER == 8
5481 ins->type = STACK_I8;
5483 ins->type = STACK_I4;
5488 ins->dreg = mono_alloc_freg (cfg);
5489 ins->type = STACK_R8;
5492 g_assert (mini_type_is_reference (fsig->params [0]));
5493 ins->dreg = mono_alloc_ireg_ref (cfg);
5494 ins->type = STACK_OBJ;
5498 if (opcode == OP_LOADI8_MEMBASE)
5499 ins = mono_decompose_opcode (cfg, ins);
5501 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5505 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5507 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5509 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5510 opcode = OP_STOREI1_MEMBASE_REG;
5511 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5512 opcode = OP_STOREI2_MEMBASE_REG;
5513 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5514 opcode = OP_STOREI4_MEMBASE_REG;
5515 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5516 opcode = OP_STOREI8_MEMBASE_REG;
5517 else if (fsig->params [0]->type == MONO_TYPE_R4)
5518 opcode = OP_STORER4_MEMBASE_REG;
5519 else if (fsig->params [0]->type == MONO_TYPE_R8)
5520 opcode = OP_STORER8_MEMBASE_REG;
5521 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5522 opcode = OP_STORE_MEMBASE_REG;
5525 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5527 MONO_INST_NEW (cfg, ins, opcode);
5528 ins->sreg1 = args [1]->dreg;
5529 ins->inst_destbasereg = args [0]->dreg;
5530 ins->inst_offset = 0;
5531 MONO_ADD_INS (cfg->cbb, ins);
5533 if (opcode == OP_STOREI8_MEMBASE_REG)
5534 ins = mono_decompose_opcode (cfg, ins);
5539 } else if (cmethod->klass->image == mono_defaults.corlib &&
5540 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5541 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5544 #if SIZEOF_REGISTER == 8
5545 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5546 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5547 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5548 ins->dreg = mono_alloc_preg (cfg);
5549 ins->sreg1 = args [0]->dreg;
5550 ins->type = STACK_I8;
5551 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5552 MONO_ADD_INS (cfg->cbb, ins);
5556 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5558 /* 64 bit reads are already atomic */
5559 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5560 load_ins->dreg = mono_alloc_preg (cfg);
5561 load_ins->inst_basereg = args [0]->dreg;
5562 load_ins->inst_offset = 0;
5563 load_ins->type = STACK_I8;
5564 MONO_ADD_INS (cfg->cbb, load_ins);
5566 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5573 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5574 MonoInst *ins_iconst;
5577 if (fsig->params [0]->type == MONO_TYPE_I4) {
5578 opcode = OP_ATOMIC_ADD_I4;
5579 cfg->has_atomic_add_i4 = TRUE;
5581 #if SIZEOF_REGISTER == 8
5582 else if (fsig->params [0]->type == MONO_TYPE_I8)
5583 opcode = OP_ATOMIC_ADD_I8;
5586 if (!mono_arch_opcode_supported (opcode))
5588 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5589 ins_iconst->inst_c0 = 1;
5590 ins_iconst->dreg = mono_alloc_ireg (cfg);
5591 MONO_ADD_INS (cfg->cbb, ins_iconst);
5593 MONO_INST_NEW (cfg, ins, opcode);
5594 ins->dreg = mono_alloc_ireg (cfg);
5595 ins->inst_basereg = args [0]->dreg;
5596 ins->inst_offset = 0;
5597 ins->sreg2 = ins_iconst->dreg;
5598 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5599 MONO_ADD_INS (cfg->cbb, ins);
5601 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5602 MonoInst *ins_iconst;
5605 if (fsig->params [0]->type == MONO_TYPE_I4) {
5606 opcode = OP_ATOMIC_ADD_I4;
5607 cfg->has_atomic_add_i4 = TRUE;
5609 #if SIZEOF_REGISTER == 8
5610 else if (fsig->params [0]->type == MONO_TYPE_I8)
5611 opcode = OP_ATOMIC_ADD_I8;
5614 if (!mono_arch_opcode_supported (opcode))
5616 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5617 ins_iconst->inst_c0 = -1;
5618 ins_iconst->dreg = mono_alloc_ireg (cfg);
5619 MONO_ADD_INS (cfg->cbb, ins_iconst);
5621 MONO_INST_NEW (cfg, ins, opcode);
5622 ins->dreg = mono_alloc_ireg (cfg);
5623 ins->inst_basereg = args [0]->dreg;
5624 ins->inst_offset = 0;
5625 ins->sreg2 = ins_iconst->dreg;
5626 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5627 MONO_ADD_INS (cfg->cbb, ins);
5629 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5632 if (fsig->params [0]->type == MONO_TYPE_I4) {
5633 opcode = OP_ATOMIC_ADD_I4;
5634 cfg->has_atomic_add_i4 = TRUE;
5636 #if SIZEOF_REGISTER == 8
5637 else if (fsig->params [0]->type == MONO_TYPE_I8)
5638 opcode = OP_ATOMIC_ADD_I8;
5641 if (!mono_arch_opcode_supported (opcode))
5643 MONO_INST_NEW (cfg, ins, opcode);
5644 ins->dreg = mono_alloc_ireg (cfg);
5645 ins->inst_basereg = args [0]->dreg;
5646 ins->inst_offset = 0;
5647 ins->sreg2 = args [1]->dreg;
5648 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5649 MONO_ADD_INS (cfg->cbb, ins);
5652 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5653 MonoInst *f2i = NULL, *i2f;
5654 guint32 opcode, f2i_opcode, i2f_opcode;
5655 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5656 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5658 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5659 fsig->params [0]->type == MONO_TYPE_R4) {
5660 opcode = OP_ATOMIC_EXCHANGE_I4;
5661 f2i_opcode = OP_MOVE_F_TO_I4;
5662 i2f_opcode = OP_MOVE_I4_TO_F;
5663 cfg->has_atomic_exchange_i4 = TRUE;
5665 #if SIZEOF_REGISTER == 8
5667 fsig->params [0]->type == MONO_TYPE_I8 ||
5668 fsig->params [0]->type == MONO_TYPE_R8 ||
5669 fsig->params [0]->type == MONO_TYPE_I) {
5670 opcode = OP_ATOMIC_EXCHANGE_I8;
5671 f2i_opcode = OP_MOVE_F_TO_I8;
5672 i2f_opcode = OP_MOVE_I8_TO_F;
5675 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5676 opcode = OP_ATOMIC_EXCHANGE_I4;
5677 cfg->has_atomic_exchange_i4 = TRUE;
5683 if (!mono_arch_opcode_supported (opcode))
5687 /* TODO: Decompose these opcodes instead of bailing here. */
5688 if (COMPILE_SOFT_FLOAT (cfg))
5691 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5692 f2i->dreg = mono_alloc_ireg (cfg);
5693 f2i->sreg1 = args [1]->dreg;
5694 if (f2i_opcode == OP_MOVE_F_TO_I4)
5695 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5696 MONO_ADD_INS (cfg->cbb, f2i);
5699 MONO_INST_NEW (cfg, ins, opcode);
5700 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5701 ins->inst_basereg = args [0]->dreg;
5702 ins->inst_offset = 0;
5703 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5704 MONO_ADD_INS (cfg->cbb, ins);
5706 switch (fsig->params [0]->type) {
5708 ins->type = STACK_I4;
5711 ins->type = STACK_I8;
5714 #if SIZEOF_REGISTER == 8
5715 ins->type = STACK_I8;
5717 ins->type = STACK_I4;
5722 ins->type = STACK_R8;
5725 g_assert (mini_type_is_reference (fsig->params [0]));
5726 ins->type = STACK_OBJ;
5731 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5732 i2f->dreg = mono_alloc_freg (cfg);
5733 i2f->sreg1 = ins->dreg;
5734 i2f->type = STACK_R8;
5735 if (i2f_opcode == OP_MOVE_I4_TO_F)
5736 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5737 MONO_ADD_INS (cfg->cbb, i2f);
5742 if (cfg->gen_write_barriers && is_ref)
5743 emit_write_barrier (cfg, args [0], args [1]);
5745 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5746 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5747 guint32 opcode, f2i_opcode, i2f_opcode;
5748 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5749 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5751 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5752 fsig->params [1]->type == MONO_TYPE_R4) {
5753 opcode = OP_ATOMIC_CAS_I4;
5754 f2i_opcode = OP_MOVE_F_TO_I4;
5755 i2f_opcode = OP_MOVE_I4_TO_F;
5756 cfg->has_atomic_cas_i4 = TRUE;
5758 #if SIZEOF_REGISTER == 8
5760 fsig->params [1]->type == MONO_TYPE_I8 ||
5761 fsig->params [1]->type == MONO_TYPE_R8 ||
5762 fsig->params [1]->type == MONO_TYPE_I) {
5763 opcode = OP_ATOMIC_CAS_I8;
5764 f2i_opcode = OP_MOVE_F_TO_I8;
5765 i2f_opcode = OP_MOVE_I8_TO_F;
5768 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5769 opcode = OP_ATOMIC_CAS_I4;
5770 cfg->has_atomic_cas_i4 = TRUE;
5776 if (!mono_arch_opcode_supported (opcode))
5780 /* TODO: Decompose these opcodes instead of bailing here. */
5781 if (COMPILE_SOFT_FLOAT (cfg))
5784 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5785 f2i_new->dreg = mono_alloc_ireg (cfg);
5786 f2i_new->sreg1 = args [1]->dreg;
5787 if (f2i_opcode == OP_MOVE_F_TO_I4)
5788 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5789 MONO_ADD_INS (cfg->cbb, f2i_new);
5791 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5792 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5793 f2i_cmp->sreg1 = args [2]->dreg;
5794 if (f2i_opcode == OP_MOVE_F_TO_I4)
5795 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5796 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5799 MONO_INST_NEW (cfg, ins, opcode);
5800 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5801 ins->sreg1 = args [0]->dreg;
5802 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5803 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5804 MONO_ADD_INS (cfg->cbb, ins);
5806 switch (fsig->params [1]->type) {
5808 ins->type = STACK_I4;
5811 ins->type = STACK_I8;
5814 #if SIZEOF_REGISTER == 8
5815 ins->type = STACK_I8;
5817 ins->type = STACK_I4;
5821 ins->type = cfg->r4_stack_type;
5824 ins->type = STACK_R8;
5827 g_assert (mini_type_is_reference (fsig->params [1]));
5828 ins->type = STACK_OBJ;
5833 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5834 i2f->dreg = mono_alloc_freg (cfg);
5835 i2f->sreg1 = ins->dreg;
5836 i2f->type = STACK_R8;
5837 if (i2f_opcode == OP_MOVE_I4_TO_F)
5838 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5839 MONO_ADD_INS (cfg->cbb, i2f);
5844 if (cfg->gen_write_barriers && is_ref)
5845 emit_write_barrier (cfg, args [0], args [1]);
5847 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5848 fsig->params [1]->type == MONO_TYPE_I4) {
5849 MonoInst *cmp, *ceq;
5851 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5854 /* int32 r = CAS (location, value, comparand); */
5855 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5856 ins->dreg = alloc_ireg (cfg);
5857 ins->sreg1 = args [0]->dreg;
5858 ins->sreg2 = args [1]->dreg;
5859 ins->sreg3 = args [2]->dreg;
5860 ins->type = STACK_I4;
5861 MONO_ADD_INS (cfg->cbb, ins);
5863 /* bool result = r == comparand; */
5864 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5865 cmp->sreg1 = ins->dreg;
5866 cmp->sreg2 = args [2]->dreg;
5867 cmp->type = STACK_I4;
5868 MONO_ADD_INS (cfg->cbb, cmp);
5870 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5871 ceq->dreg = alloc_ireg (cfg);
5872 ceq->type = STACK_I4;
5873 MONO_ADD_INS (cfg->cbb, ceq);
5875 /* *success = result; */
5876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5878 cfg->has_atomic_cas_i4 = TRUE;
5880 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5881 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5885 } else if (cmethod->klass->image == mono_defaults.corlib &&
5886 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5887 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5890 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5892 MonoType *t = fsig->params [0];
5894 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5896 g_assert (t->byref);
5897 /* t is a byref type, so the reference check is more complicated */
5898 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5899 if (t->type == MONO_TYPE_I1)
5900 opcode = OP_ATOMIC_LOAD_I1;
5901 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5902 opcode = OP_ATOMIC_LOAD_U1;
5903 else if (t->type == MONO_TYPE_I2)
5904 opcode = OP_ATOMIC_LOAD_I2;
5905 else if (t->type == MONO_TYPE_U2)
5906 opcode = OP_ATOMIC_LOAD_U2;
5907 else if (t->type == MONO_TYPE_I4)
5908 opcode = OP_ATOMIC_LOAD_I4;
5909 else if (t->type == MONO_TYPE_U4)
5910 opcode = OP_ATOMIC_LOAD_U4;
5911 else if (t->type == MONO_TYPE_R4)
5912 opcode = OP_ATOMIC_LOAD_R4;
5913 else if (t->type == MONO_TYPE_R8)
5914 opcode = OP_ATOMIC_LOAD_R8;
5915 #if SIZEOF_REGISTER == 8
5916 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5917 opcode = OP_ATOMIC_LOAD_I8;
5918 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5919 opcode = OP_ATOMIC_LOAD_U8;
5921 else if (t->type == MONO_TYPE_I)
5922 opcode = OP_ATOMIC_LOAD_I4;
5923 else if (is_ref || t->type == MONO_TYPE_U)
5924 opcode = OP_ATOMIC_LOAD_U4;
5928 if (!mono_arch_opcode_supported (opcode))
5931 MONO_INST_NEW (cfg, ins, opcode);
5932 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5933 ins->sreg1 = args [0]->dreg;
5934 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5935 MONO_ADD_INS (cfg->cbb, ins);
5938 case MONO_TYPE_BOOLEAN:
5945 ins->type = STACK_I4;
5949 ins->type = STACK_I8;
5953 #if SIZEOF_REGISTER == 8
5954 ins->type = STACK_I8;
5956 ins->type = STACK_I4;
5960 ins->type = cfg->r4_stack_type;
5963 ins->type = STACK_R8;
5967 ins->type = STACK_OBJ;
5973 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5975 MonoType *t = fsig->params [0];
5978 g_assert (t->byref);
5979 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5980 if (t->type == MONO_TYPE_I1)
5981 opcode = OP_ATOMIC_STORE_I1;
5982 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5983 opcode = OP_ATOMIC_STORE_U1;
5984 else if (t->type == MONO_TYPE_I2)
5985 opcode = OP_ATOMIC_STORE_I2;
5986 else if (t->type == MONO_TYPE_U2)
5987 opcode = OP_ATOMIC_STORE_U2;
5988 else if (t->type == MONO_TYPE_I4)
5989 opcode = OP_ATOMIC_STORE_I4;
5990 else if (t->type == MONO_TYPE_U4)
5991 opcode = OP_ATOMIC_STORE_U4;
5992 else if (t->type == MONO_TYPE_R4)
5993 opcode = OP_ATOMIC_STORE_R4;
5994 else if (t->type == MONO_TYPE_R8)
5995 opcode = OP_ATOMIC_STORE_R8;
5996 #if SIZEOF_REGISTER == 8
5997 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5998 opcode = OP_ATOMIC_STORE_I8;
5999 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6000 opcode = OP_ATOMIC_STORE_U8;
6002 else if (t->type == MONO_TYPE_I)
6003 opcode = OP_ATOMIC_STORE_I4;
6004 else if (is_ref || t->type == MONO_TYPE_U)
6005 opcode = OP_ATOMIC_STORE_U4;
6009 if (!mono_arch_opcode_supported (opcode))
6012 MONO_INST_NEW (cfg, ins, opcode);
6013 ins->dreg = args [0]->dreg;
6014 ins->sreg1 = args [1]->dreg;
6015 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6016 MONO_ADD_INS (cfg->cbb, ins);
6018 if (cfg->gen_write_barriers && is_ref)
6019 emit_write_barrier (cfg, args [0], args [1]);
6025 } else if (cmethod->klass->image == mono_defaults.corlib &&
6026 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6027 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6028 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6029 if (should_insert_brekpoint (cfg->method)) {
6030 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6032 MONO_INST_NEW (cfg, ins, OP_NOP);
6033 MONO_ADD_INS (cfg->cbb, ins);
6037 } else if (cmethod->klass->image == mono_defaults.corlib &&
6038 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6039 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6040 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6042 EMIT_NEW_ICONST (cfg, ins, 1);
6044 EMIT_NEW_ICONST (cfg, ins, 0);
6047 } else if (cmethod->klass->image == mono_defaults.corlib &&
6048 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6049 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6050 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6051 /* No stack walks are currently available, so implement this as an intrinsic */
6052 MonoInst *assembly_ins;
6054 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6055 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6058 } else if (cmethod->klass->image == mono_defaults.corlib &&
6059 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6060 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6061 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6062 /* No stack walks are currently available, so implement this as an intrinsic */
6063 MonoInst *method_ins;
6064 MonoMethod *declaring = cfg->method;
6066 /* This returns the declaring generic method */
6067 if (declaring->is_inflated)
6068 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6069 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6070 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6071 cfg->no_inline = TRUE;
6072 if (cfg->method != cfg->current_method)
6073 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6076 } else if (cmethod->klass == mono_defaults.math_class) {
6078 * There is general branchless code for Min/Max, but it does not work for
6080 * http://everything2.com/?node_id=1051618
6082 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6083 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6084 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6085 ins->dreg = alloc_preg (cfg);
6086 ins->type = STACK_I4;
6087 MONO_ADD_INS (cfg->cbb, ins);
6089 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6090 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6091 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6092 !strcmp (cmethod->klass->name, "Selector")) ||
6093 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6094 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6095 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6096 !strcmp (cmethod->klass->name, "Selector"))
6098 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6099 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6100 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6103 MonoJumpInfoToken *ji;
6106 if (args [0]->opcode == OP_GOT_ENTRY) {
6107 pi = (MonoInst *)args [0]->inst_p1;
6108 g_assert (pi->opcode == OP_PATCH_INFO);
6109 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6110 ji = (MonoJumpInfoToken *)pi->inst_p0;
6112 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6113 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6116 NULLIFY_INS (args [0]);
6118 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6119 return_val_if_nok (&cfg->error, NULL);
6121 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6122 ins->dreg = mono_alloc_ireg (cfg);
6125 MONO_ADD_INS (cfg->cbb, ins);
6130 #ifdef MONO_ARCH_SIMD_INTRINSICS
6131 if (cfg->opt & MONO_OPT_SIMD) {
6132 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6138 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6142 if (COMPILE_LLVM (cfg)) {
6143 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6148 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6152 * This entry point could be used later for arbitrary method
6155 inline static MonoInst*
6156 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6157 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6159 if (method->klass == mono_defaults.string_class) {
6160 /* managed string allocation support */
6161 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6162 MonoInst *iargs [2];
6163 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6164 MonoMethod *managed_alloc = NULL;
6166 g_assert (vtable); /*Should not fail since it System.String*/
6167 #ifndef MONO_CROSS_COMPILE
6168 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6172 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6173 iargs [1] = args [0];
6174 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6181 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6183 MonoInst *store, *temp;
6186 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6187 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6190 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6191 * would be different than the MonoInst's used to represent arguments, and
6192 * the ldelema implementation can't deal with that.
6193 * Solution: When ldelema is used on an inline argument, create a var for
6194 * it, emit ldelema on that var, and emit the saving code below in
6195 * inline_method () if needed.
6197 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6198 cfg->args [i] = temp;
6199 /* This uses cfg->args [i] which is set by the preceeding line */
6200 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6201 store->cil_code = sp [0]->cil_code;
6206 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6207 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6209 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6211 check_inline_called_method_name_limit (MonoMethod *called_method)
6214 static const char *limit = NULL;
6216 if (limit == NULL) {
6217 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6219 if (limit_string != NULL)
6220 limit = limit_string;
6225 if (limit [0] != '\0') {
6226 char *called_method_name = mono_method_full_name (called_method, TRUE);
6228 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6229 g_free (called_method_name);
6231 //return (strncmp_result <= 0);
6232 return (strncmp_result == 0);
6239 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6241 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6244 static const char *limit = NULL;
6246 if (limit == NULL) {
6247 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6248 if (limit_string != NULL) {
6249 limit = limit_string;
6255 if (limit [0] != '\0') {
6256 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6258 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6259 g_free (caller_method_name);
6261 //return (strncmp_result <= 0);
6262 return (strncmp_result == 0);
6270 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6272 static double r8_0 = 0.0;
6273 static float r4_0 = 0.0;
6277 rtype = mini_get_underlying_type (rtype);
6281 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6282 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6283 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6284 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6285 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6286 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6287 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6288 ins->type = STACK_R4;
6289 ins->inst_p0 = (void*)&r4_0;
6291 MONO_ADD_INS (cfg->cbb, ins);
6292 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6293 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6294 ins->type = STACK_R8;
6295 ins->inst_p0 = (void*)&r8_0;
6297 MONO_ADD_INS (cfg->cbb, ins);
6298 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6299 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6300 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6301 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6302 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6304 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6309 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6313 rtype = mini_get_underlying_type (rtype);
6317 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6318 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6319 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6320 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6321 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6322 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6323 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6324 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6325 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6326 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6327 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6328 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6329 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6330 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6332 emit_init_rvar (cfg, dreg, rtype);
6336 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6338 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6340 MonoInst *var = cfg->locals [local];
6341 if (COMPILE_SOFT_FLOAT (cfg)) {
6343 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6344 emit_init_rvar (cfg, reg, type);
6345 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6348 emit_init_rvar (cfg, var->dreg, type);
6350 emit_dummy_init_rvar (cfg, var->dreg, type);
6355 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6357 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6363 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6366 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6367 guchar *ip, guint real_offset, gboolean inline_always)
6370 MonoInst *ins, *rvar = NULL;
6371 MonoMethodHeader *cheader;
6372 MonoBasicBlock *ebblock, *sbblock;
6374 MonoMethod *prev_inlined_method;
6375 MonoInst **prev_locals, **prev_args;
6376 MonoType **prev_arg_types;
6377 guint prev_real_offset;
6378 GHashTable *prev_cbb_hash;
6379 MonoBasicBlock **prev_cil_offset_to_bb;
6380 MonoBasicBlock *prev_cbb;
6381 const unsigned char *prev_ip;
6382 unsigned char *prev_cil_start;
6383 guint32 prev_cil_offset_to_bb_len;
6384 MonoMethod *prev_current_method;
6385 MonoGenericContext *prev_generic_context;
6386 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6388 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6390 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6391 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6394 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6395 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6400 fsig = mono_method_signature (cmethod);
6402 if (cfg->verbose_level > 2)
6403 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6405 if (!cmethod->inline_info) {
6406 cfg->stat_inlineable_methods++;
6407 cmethod->inline_info = 1;
6410 /* allocate local variables */
6411 cheader = mono_method_get_header_checked (cmethod, &error);
6413 if (inline_always) {
6414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6415 mono_error_move (&cfg->error, &error);
6417 mono_error_cleanup (&error);
6422 /*Must verify before creating locals as it can cause the JIT to assert.*/
6423 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6424 mono_metadata_free_mh (cheader);
6428 /* allocate space to store the return value */
6429 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6430 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6433 prev_locals = cfg->locals;
6434 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6435 for (i = 0; i < cheader->num_locals; ++i)
6436 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6438 /* allocate start and end blocks */
6439 /* This is needed so if the inline is aborted, we can clean up */
6440 NEW_BBLOCK (cfg, sbblock);
6441 sbblock->real_offset = real_offset;
6443 NEW_BBLOCK (cfg, ebblock);
6444 ebblock->block_num = cfg->num_bblocks++;
6445 ebblock->real_offset = real_offset;
6447 prev_args = cfg->args;
6448 prev_arg_types = cfg->arg_types;
6449 prev_inlined_method = cfg->inlined_method;
6450 cfg->inlined_method = cmethod;
6451 cfg->ret_var_set = FALSE;
6452 cfg->inline_depth ++;
6453 prev_real_offset = cfg->real_offset;
6454 prev_cbb_hash = cfg->cbb_hash;
6455 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6456 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6457 prev_cil_start = cfg->cil_start;
6459 prev_cbb = cfg->cbb;
6460 prev_current_method = cfg->current_method;
6461 prev_generic_context = cfg->generic_context;
6462 prev_ret_var_set = cfg->ret_var_set;
6463 prev_disable_inline = cfg->disable_inline;
6465 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6468 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6470 ret_var_set = cfg->ret_var_set;
6472 cfg->inlined_method = prev_inlined_method;
6473 cfg->real_offset = prev_real_offset;
6474 cfg->cbb_hash = prev_cbb_hash;
6475 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6476 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6477 cfg->cil_start = prev_cil_start;
6479 cfg->locals = prev_locals;
6480 cfg->args = prev_args;
6481 cfg->arg_types = prev_arg_types;
6482 cfg->current_method = prev_current_method;
6483 cfg->generic_context = prev_generic_context;
6484 cfg->ret_var_set = prev_ret_var_set;
6485 cfg->disable_inline = prev_disable_inline;
6486 cfg->inline_depth --;
6488 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6489 if (cfg->verbose_level > 2)
6490 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6492 cfg->stat_inlined_methods++;
6494 /* always add some code to avoid block split failures */
6495 MONO_INST_NEW (cfg, ins, OP_NOP);
6496 MONO_ADD_INS (prev_cbb, ins);
6498 prev_cbb->next_bb = sbblock;
6499 link_bblock (cfg, prev_cbb, sbblock);
6502 * Get rid of the begin and end bblocks if possible to aid local
6505 if (prev_cbb->out_count == 1)
6506 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6508 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6509 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6511 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6512 MonoBasicBlock *prev = ebblock->in_bb [0];
6514 if (prev->next_bb == ebblock) {
6515 mono_merge_basic_blocks (cfg, prev, ebblock);
6517 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6518 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6519 cfg->cbb = prev_cbb;
6522 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6527 * Its possible that the rvar is set in some prev bblock, but not in others.
6533 for (i = 0; i < ebblock->in_count; ++i) {
6534 bb = ebblock->in_bb [i];
6536 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6539 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6549 * If the inlined method contains only a throw, then the ret var is not
6550 * set, so set it to a dummy value.
6553 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6555 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6558 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6561 if (cfg->verbose_level > 2)
6562 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6563 cfg->exception_type = MONO_EXCEPTION_NONE;
6565 /* This gets rid of the newly added bblocks */
6566 cfg->cbb = prev_cbb;
6568 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6573 * Some of these comments may well be out-of-date.
6574 * Design decisions: we do a single pass over the IL code (and we do bblock
6575 * splitting/merging in the few cases when it's required: a back jump to an IL
6576 * address that was not already seen as bblock starting point).
6577 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6578 * Complex operations are decomposed in simpler ones right away. We need to let the
6579 * arch-specific code peek and poke inside this process somehow (except when the
6580 * optimizations can take advantage of the full semantic info of coarse opcodes).
6581 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6582 * MonoInst->opcode initially is the IL opcode or some simplification of that
6583 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6584 * opcode with value bigger than OP_LAST.
6585 * At this point the IR can be handed over to an interpreter, a dumb code generator
6586 * or to the optimizing code generator that will translate it to SSA form.
6588 * Profiling directed optimizations.
6589 * We may compile by default with few or no optimizations and instrument the code
6590 * or the user may indicate what methods to optimize the most either in a config file
6591 * or through repeated runs where the compiler applies offline the optimizations to
6592 * each method and then decides if it was worth it.
6595 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6596 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6597 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6598 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6599 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6600 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6601 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6602 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6604 /* offset from br.s -> br like opcodes */
6605 #define BIG_BRANCH_OFFSET 13
6608 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6610 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6612 return b == NULL || b == bb;
6616 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6618 unsigned char *ip = start;
6619 unsigned char *target;
6622 MonoBasicBlock *bblock;
6623 const MonoOpcode *opcode;
6626 cli_addr = ip - start;
6627 i = mono_opcode_value ((const guint8 **)&ip, end);
6630 opcode = &mono_opcodes [i];
6631 switch (opcode->argument) {
6632 case MonoInlineNone:
6635 case MonoInlineString:
6636 case MonoInlineType:
6637 case MonoInlineField:
6638 case MonoInlineMethod:
6641 case MonoShortInlineR:
6648 case MonoShortInlineVar:
6649 case MonoShortInlineI:
6652 case MonoShortInlineBrTarget:
6653 target = start + cli_addr + 2 + (signed char)ip [1];
6654 GET_BBLOCK (cfg, bblock, target);
6657 GET_BBLOCK (cfg, bblock, ip);
6659 case MonoInlineBrTarget:
6660 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6661 GET_BBLOCK (cfg, bblock, target);
6664 GET_BBLOCK (cfg, bblock, ip);
6666 case MonoInlineSwitch: {
6667 guint32 n = read32 (ip + 1);
6670 cli_addr += 5 + 4 * n;
6671 target = start + cli_addr;
6672 GET_BBLOCK (cfg, bblock, target);
6674 for (j = 0; j < n; ++j) {
6675 target = start + cli_addr + (gint32)read32 (ip);
6676 GET_BBLOCK (cfg, bblock, target);
6686 g_assert_not_reached ();
6689 if (i == CEE_THROW) {
6690 unsigned char *bb_start = ip - 1;
6692 /* Find the start of the bblock containing the throw */
6694 while ((bb_start >= start) && !bblock) {
6695 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6699 bblock->out_of_line = 1;
6709 static inline MonoMethod *
6710 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6716 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6717 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6719 method = mono_class_inflate_generic_method_checked (method, context, error);
6722 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6728 static inline MonoMethod *
6729 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6732 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6734 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6735 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6739 if (!method && !cfg)
6740 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6745 static inline MonoClass*
6746 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6751 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6752 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6754 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6755 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6758 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6759 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6762 mono_class_init (klass);
6766 static inline MonoMethodSignature*
6767 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6769 MonoMethodSignature *fsig;
6772 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6773 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6775 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6776 return_val_if_nok (error, NULL);
6779 fsig = mono_inflate_generic_signature(fsig, context, error);
6785 throw_exception (void)
6787 static MonoMethod *method = NULL;
6790 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6791 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6798 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6800 MonoMethod *thrower = throw_exception ();
6803 EMIT_NEW_PCONST (cfg, args [0], ex);
6804 mono_emit_method_call (cfg, thrower, args, NULL);
6808 * Return the original method is a wrapper is specified. We can only access
6809 * the custom attributes from the original method.
6812 get_original_method (MonoMethod *method)
6814 if (method->wrapper_type == MONO_WRAPPER_NONE)
6817 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6818 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6821 /* in other cases we need to find the original method */
6822 return mono_marshal_method_from_wrapper (method);
6826 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6828 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6829 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6831 emit_throw_exception (cfg, ex);
6835 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6837 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6838 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6840 emit_throw_exception (cfg, ex);
6844 * Check that the IL instructions at ip are the array initialization
6845 * sequence and return the pointer to the data and the size.
6848 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6851 * newarr[System.Int32]
6853 * ldtoken field valuetype ...
6854 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6856 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6858 guint32 token = read32 (ip + 7);
6859 guint32 field_token = read32 (ip + 2);
6860 guint32 field_index = field_token & 0xffffff;
6862 const char *data_ptr;
6864 MonoMethod *cmethod;
6865 MonoClass *dummy_class;
6866 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6870 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6874 *out_field_token = field_token;
6876 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6879 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6881 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6882 case MONO_TYPE_BOOLEAN:
6886 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6887 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6888 case MONO_TYPE_CHAR:
6905 if (size > mono_type_size (field->type, &dummy_align))
6908 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6909 if (!image_is_dynamic (method->klass->image)) {
6910 field_index = read32 (ip + 2) & 0xffffff;
6911 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6912 data_ptr = mono_image_rva_map (method->klass->image, rva);
6913 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6914 /* for aot code we do the lookup on load */
6915 if (aot && data_ptr)
6916 return (const char *)GUINT_TO_POINTER (rva);
6918 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6920 data_ptr = mono_field_get_data (field);
6928 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6931 char *method_fname = mono_method_full_name (method, TRUE);
6933 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6936 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6937 mono_error_cleanup (&error);
6938 } else if (header->code_size == 0)
6939 method_code = g_strdup ("method body is empty.");
6941 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6942 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6943 g_free (method_fname);
6944 g_free (method_code);
6945 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6949 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6952 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6953 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6954 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6955 /* Optimize reg-reg moves away */
6957 * Can't optimize other opcodes, since sp[0] might point to
6958 * the last ins of a decomposed opcode.
6960 sp [0]->dreg = (cfg)->locals [n]->dreg;
6962 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6967 * ldloca inhibits many optimizations so try to get rid of it in common
6970 static inline unsigned char *
6971 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6981 local = read16 (ip + 2);
6985 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6986 /* From the INITOBJ case */
6987 token = read32 (ip + 2);
6988 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6989 CHECK_TYPELOAD (klass);
6990 type = mini_get_underlying_type (&klass->byval_arg);
6991 emit_init_local (cfg, local, type, TRUE);
6999 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7001 MonoInst *icall_args [16];
7002 MonoInst *call_target, *ins, *vtable_ins;
7003 int arg_reg, this_reg, vtable_reg;
7004 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7005 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7006 gboolean variant_iface = FALSE;
7009 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7012 * In llvm-only mode, vtables contain function descriptors instead of
7013 * method addresses/trampolines.
7015 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7018 slot = mono_method_get_imt_slot (cmethod);
7020 slot = mono_method_get_vtable_index (cmethod);
7022 this_reg = sp [0]->dreg;
7024 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7025 variant_iface = TRUE;
7027 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7029 * The simplest case, a normal virtual call.
7031 int slot_reg = alloc_preg (cfg);
7032 int addr_reg = alloc_preg (cfg);
7033 int arg_reg = alloc_preg (cfg);
7034 MonoBasicBlock *non_null_bb;
7036 vtable_reg = alloc_preg (cfg);
7037 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7038 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7040 /* Load the vtable slot, which contains a function descriptor. */
7041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7043 NEW_BBLOCK (cfg, non_null_bb);
7045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7046 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7050 // FIXME: Make the wrapper use the preserveall cconv
7051 // FIXME: Use one icall per slot for small slot numbers ?
7052 icall_args [0] = vtable_ins;
7053 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7054 /* Make the icall return the vtable slot value to save some code space */
7055 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7056 ins->dreg = slot_reg;
7057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7060 MONO_START_BB (cfg, non_null_bb);
7061 /* Load the address + arg from the vtable slot */
7062 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7065 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7068 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7070 * A simple interface call
7072 * We make a call through an imt slot to obtain the function descriptor we need to call.
7073 * The imt slot contains a function descriptor for a runtime function + arg.
7075 int slot_reg = alloc_preg (cfg);
7076 int addr_reg = alloc_preg (cfg);
7077 int arg_reg = alloc_preg (cfg);
7078 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7080 vtable_reg = alloc_preg (cfg);
7081 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7082 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7085 * The slot is already initialized when the vtable is created so there is no need
7089 /* Load the imt slot, which contains a function descriptor. */
7090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7092 /* Load the address + arg of the imt thunk from the imt slot */
7093 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7094 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7096 * IMT thunks in llvm-only mode are C functions which take an info argument
7097 * plus the imt method and return the ftndesc to call.
7099 icall_args [0] = thunk_arg_ins;
7100 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7101 cmethod, MONO_RGCTX_INFO_METHOD);
7102 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7104 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7107 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7109 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7110 * dynamically extended as more instantiations are discovered.
7111 * This handles generic virtual methods both on classes and interfaces.
7113 int slot_reg = alloc_preg (cfg);
7114 int addr_reg = alloc_preg (cfg);
7115 int arg_reg = alloc_preg (cfg);
7116 int ftndesc_reg = alloc_preg (cfg);
7117 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7118 MonoBasicBlock *slowpath_bb, *end_bb;
7120 NEW_BBLOCK (cfg, slowpath_bb);
7121 NEW_BBLOCK (cfg, end_bb);
7123 vtable_reg = alloc_preg (cfg);
7124 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7126 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7128 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7130 /* Load the slot, which contains a function descriptor. */
7131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7133 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7134 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7139 /* Same as with iface calls */
7140 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7141 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7142 icall_args [0] = thunk_arg_ins;
7143 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7144 cmethod, MONO_RGCTX_INFO_METHOD);
7145 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7146 ftndesc_ins->dreg = ftndesc_reg;
7148 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7149 * they don't know about yet. Fall back to the slowpath in that case.
7151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7157 MONO_START_BB (cfg, slowpath_bb);
7158 icall_args [0] = vtable_ins;
7159 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7160 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7161 cmethod, MONO_RGCTX_INFO_METHOD);
7163 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7165 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7166 ftndesc_ins->dreg = ftndesc_reg;
7167 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7170 MONO_START_BB (cfg, end_bb);
7171 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7175 * Non-optimized cases
7177 icall_args [0] = sp [0];
7178 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7180 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7181 cmethod, MONO_RGCTX_INFO_METHOD);
7183 arg_reg = alloc_preg (cfg);
7184 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7185 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7187 g_assert (is_gsharedvt);
7189 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7191 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7194 * Pass the extra argument even if the callee doesn't receive it, most
7195 * calling conventions allow this.
7197 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7201 is_exception_class (MonoClass *klass)
7204 if (klass == mono_defaults.exception_class)
7206 klass = klass->parent;
7212 * is_jit_optimizer_disabled:
7214 * Determine whenever M's assembly has a DebuggableAttribute with the
7215 * IsJITOptimizerDisabled flag set.
7218 is_jit_optimizer_disabled (MonoMethod *m)
7221 MonoAssembly *ass = m->klass->image->assembly;
7222 MonoCustomAttrInfo* attrs;
7225 gboolean val = FALSE;
7228 if (ass->jit_optimizer_disabled_inited)
7229 return ass->jit_optimizer_disabled;
7231 klass = mono_class_try_get_debuggable_attribute_class ();
7235 ass->jit_optimizer_disabled = FALSE;
7236 mono_memory_barrier ();
7237 ass->jit_optimizer_disabled_inited = TRUE;
7241 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7242 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7244 for (i = 0; i < attrs->num_attrs; ++i) {
7245 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7247 MonoMethodSignature *sig;
7249 if (!attr->ctor || attr->ctor->klass != klass)
7251 /* Decode the attribute. See reflection.c */
7252 p = (const char*)attr->data;
7253 g_assert (read16 (p) == 0x0001);
7256 // FIXME: Support named parameters
7257 sig = mono_method_signature (attr->ctor);
7258 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7260 /* Two boolean arguments */
7264 mono_custom_attrs_free (attrs);
7267 ass->jit_optimizer_disabled = val;
7268 mono_memory_barrier ();
7269 ass->jit_optimizer_disabled_inited = TRUE;
7275 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7277 gboolean supported_tail_call;
7280 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7282 for (i = 0; i < fsig->param_count; ++i) {
7283 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7284 /* These can point to the current method's stack */
7285 supported_tail_call = FALSE;
7287 if (fsig->hasthis && cmethod->klass->valuetype)
7288 /* this might point to the current method's stack */
7289 supported_tail_call = FALSE;
7290 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7291 supported_tail_call = FALSE;
7292 if (cfg->method->save_lmf)
7293 supported_tail_call = FALSE;
7294 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7295 supported_tail_call = FALSE;
7296 if (call_opcode != CEE_CALL)
7297 supported_tail_call = FALSE;
7299 /* Debugging support */
7301 if (supported_tail_call) {
7302 if (!mono_debug_count ())
7303 supported_tail_call = FALSE;
7307 return supported_tail_call;
7313 * Handle calls made to ctors from NEWOBJ opcodes.
7316 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7317 MonoInst **sp, guint8 *ip, int *inline_costs)
7319 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7321 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7322 mono_method_is_generic_sharable (cmethod, TRUE)) {
7323 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7324 mono_class_vtable (cfg->domain, cmethod->klass);
7325 CHECK_TYPELOAD (cmethod->klass);
7327 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7328 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7331 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7332 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7334 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7336 CHECK_TYPELOAD (cmethod->klass);
7337 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7342 /* Avoid virtual calls to ctors if possible */
7343 if (mono_class_is_marshalbyref (cmethod->klass))
7344 callvirt_this_arg = sp [0];
7346 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7347 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7348 CHECK_CFG_EXCEPTION;
7349 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7350 mono_method_check_inlining (cfg, cmethod) &&
7351 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7354 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7355 cfg->real_offset += 5;
7357 *inline_costs += costs - 5;
7359 INLINE_FAILURE ("inline failure");
7360 // FIXME-VT: Clean this up
7361 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7362 GSHAREDVT_FAILURE(*ip);
7363 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7365 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7368 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7370 if (cfg->llvm_only) {
7371 // FIXME: Avoid initializing vtable_arg
7372 emit_llvmonly_calli (cfg, fsig, sp, addr);
7374 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7376 } else if (context_used &&
7377 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7378 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7379 MonoInst *cmethod_addr;
7381 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7383 if (cfg->llvm_only) {
7384 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7385 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7386 emit_llvmonly_calli (cfg, fsig, sp, addr);
7388 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7389 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7391 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7394 INLINE_FAILURE ("ctor call");
7395 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7396 callvirt_this_arg, NULL, vtable_arg);
7403 emit_setret (MonoCompile *cfg, MonoInst *val)
7405 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7408 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7411 if (!cfg->vret_addr) {
7412 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7414 EMIT_NEW_RETLOADA (cfg, ret_addr);
7416 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7417 ins->klass = mono_class_from_mono_type (ret_type);
7420 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7421 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7422 MonoInst *iargs [1];
7426 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7427 mono_arch_emit_setret (cfg, cfg->method, conv);
7429 mono_arch_emit_setret (cfg, cfg->method, val);
7432 mono_arch_emit_setret (cfg, cfg->method, val);
7438 * mono_method_to_ir:
7440 * Translate the .net IL into linear IR.
7442 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7443 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7444 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7445 * @inline_args: if not NULL, contains the arguments to the inline call
7446 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7447 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7449 * This method is used to turn ECMA IL into Mono's internal Linear IR
7450 * reprensetation. It is used both for entire methods, as well as
7451 * inlining existing methods. In the former case, the @start_bblock,
7452 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7453 * inline_offset is set to zero.
7455 * Returns: the inline cost, or -1 if there was an error processing this method.
7458 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7459 MonoInst *return_var, MonoInst **inline_args,
7460 guint inline_offset, gboolean is_virtual_call)
7463 MonoInst *ins, **sp, **stack_start;
7464 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7465 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7466 MonoMethod *cmethod, *method_definition;
7467 MonoInst **arg_array;
7468 MonoMethodHeader *header;
7470 guint32 token, ins_flag;
7472 MonoClass *constrained_class = NULL;
7473 unsigned char *ip, *end, *target, *err_pos;
7474 MonoMethodSignature *sig;
7475 MonoGenericContext *generic_context = NULL;
7476 MonoGenericContainer *generic_container = NULL;
7477 MonoType **param_types;
7478 int i, n, start_new_bblock, dreg;
7479 int num_calls = 0, inline_costs = 0;
7480 int breakpoint_id = 0;
7482 GSList *class_inits = NULL;
7483 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7485 gboolean init_locals, seq_points, skip_dead_blocks;
7486 gboolean sym_seq_points = FALSE;
7487 MonoDebugMethodInfo *minfo;
7488 MonoBitSet *seq_point_locs = NULL;
7489 MonoBitSet *seq_point_set_locs = NULL;
7491 cfg->disable_inline = is_jit_optimizer_disabled (method);
7493 /* serialization and xdomain stuff may need access to private fields and methods */
7494 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7495 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7496 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7497 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7498 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7499 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7501 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7502 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7503 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7504 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7505 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7507 image = method->klass->image;
7508 header = mono_method_get_header_checked (method, &cfg->error);
7510 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7511 goto exception_exit;
7513 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7516 generic_container = mono_method_get_generic_container (method);
7517 sig = mono_method_signature (method);
7518 num_args = sig->hasthis + sig->param_count;
7519 ip = (unsigned char*)header->code;
7520 cfg->cil_start = ip;
7521 end = ip + header->code_size;
7522 cfg->stat_cil_code_size += header->code_size;
7524 seq_points = cfg->gen_seq_points && cfg->method == method;
7526 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7527 /* We could hit a seq point before attaching to the JIT (#8338) */
7531 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7532 minfo = mono_debug_lookup_method (method);
7534 MonoSymSeqPoint *sps;
7535 int i, n_il_offsets;
7537 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7538 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7539 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7540 sym_seq_points = TRUE;
7541 for (i = 0; i < n_il_offsets; ++i) {
7542 if (sps [i].il_offset < header->code_size)
7543 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7547 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7549 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7551 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7552 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7554 mono_debug_free_method_async_debug_info (asyncMethod);
7556 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7557 /* Methods without line number info like auto-generated property accessors */
7558 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7559 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7560 sym_seq_points = TRUE;
7565 * Methods without init_locals set could cause asserts in various passes
7566 * (#497220). To work around this, we emit dummy initialization opcodes
7567 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7568 * on some platforms.
7570 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7571 init_locals = header->init_locals;
7575 method_definition = method;
7576 while (method_definition->is_inflated) {
7577 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7578 method_definition = imethod->declaring;
7581 /* SkipVerification is not allowed if core-clr is enabled */
7582 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7584 dont_verify_stloc = TRUE;
7587 if (sig->is_inflated)
7588 generic_context = mono_method_get_context (method);
7589 else if (generic_container)
7590 generic_context = &generic_container->context;
7591 cfg->generic_context = generic_context;
7594 g_assert (!sig->has_type_parameters);
7596 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7597 g_assert (method->is_inflated);
7598 g_assert (mono_method_get_context (method)->method_inst);
7600 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7601 g_assert (sig->generic_param_count);
7603 if (cfg->method == method) {
7604 cfg->real_offset = 0;
7606 cfg->real_offset = inline_offset;
7609 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7610 cfg->cil_offset_to_bb_len = header->code_size;
7612 cfg->current_method = method;
7614 if (cfg->verbose_level > 2)
7615 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7617 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7619 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7620 for (n = 0; n < sig->param_count; ++n)
7621 param_types [n + sig->hasthis] = sig->params [n];
7622 cfg->arg_types = param_types;
7624 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7625 if (cfg->method == method) {
7627 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7628 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7631 NEW_BBLOCK (cfg, start_bblock);
7632 cfg->bb_entry = start_bblock;
7633 start_bblock->cil_code = NULL;
7634 start_bblock->cil_length = 0;
7637 NEW_BBLOCK (cfg, end_bblock);
7638 cfg->bb_exit = end_bblock;
7639 end_bblock->cil_code = NULL;
7640 end_bblock->cil_length = 0;
7641 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7642 g_assert (cfg->num_bblocks == 2);
7644 arg_array = cfg->args;
7646 if (header->num_clauses) {
7647 cfg->spvars = g_hash_table_new (NULL, NULL);
7648 cfg->exvars = g_hash_table_new (NULL, NULL);
7650 /* handle exception clauses */
7651 for (i = 0; i < header->num_clauses; ++i) {
7652 MonoBasicBlock *try_bb;
7653 MonoExceptionClause *clause = &header->clauses [i];
7654 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7656 try_bb->real_offset = clause->try_offset;
7657 try_bb->try_start = TRUE;
7658 try_bb->region = ((i + 1) << 8) | clause->flags;
7659 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7660 tblock->real_offset = clause->handler_offset;
7661 tblock->flags |= BB_EXCEPTION_HANDLER;
7664 * Linking the try block with the EH block hinders inlining as we won't be able to
7665 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7667 if (COMPILE_LLVM (cfg))
7668 link_bblock (cfg, try_bb, tblock);
7670 if (*(ip + clause->handler_offset) == CEE_POP)
7671 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7673 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7674 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7675 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7676 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7677 MONO_ADD_INS (tblock, ins);
7679 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7680 /* finally clauses already have a seq point */
7681 /* seq points for filter clauses are emitted below */
7682 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7683 MONO_ADD_INS (tblock, ins);
7686 /* todo: is a fault block unsafe to optimize? */
7687 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7688 tblock->flags |= BB_EXCEPTION_UNSAFE;
7691 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7693 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7695 /* catch and filter blocks get the exception object on the stack */
7696 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7697 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7699 /* mostly like handle_stack_args (), but just sets the input args */
7700 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7701 tblock->in_scount = 1;
7702 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7703 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7707 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7708 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7709 if (!cfg->compile_llvm) {
7710 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7711 ins->dreg = tblock->in_stack [0]->dreg;
7712 MONO_ADD_INS (tblock, ins);
7715 MonoInst *dummy_use;
7718 * Add a dummy use for the exvar so its liveness info will be
7721 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7724 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7725 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7726 MONO_ADD_INS (tblock, ins);
7729 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7730 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7731 tblock->flags |= BB_EXCEPTION_HANDLER;
7732 tblock->real_offset = clause->data.filter_offset;
7733 tblock->in_scount = 1;
7734 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7735 /* The filter block shares the exvar with the handler block */
7736 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7737 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7738 MONO_ADD_INS (tblock, ins);
7742 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7743 clause->data.catch_class &&
7745 mono_class_check_context_used (clause->data.catch_class)) {
7747 * In shared generic code with catch
7748 * clauses containing type variables
7749 * the exception handling code has to
7750 * be able to get to the rgctx.
7751 * Therefore we have to make sure that
7752 * the vtable/mrgctx argument (for
7753 * static or generic methods) or the
7754 * "this" argument (for non-static
7755 * methods) are live.
7757 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7758 mini_method_get_context (method)->method_inst ||
7759 method->klass->valuetype) {
7760 mono_get_vtable_var (cfg);
7762 MonoInst *dummy_use;
7764 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7769 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7770 cfg->cbb = start_bblock;
7771 cfg->args = arg_array;
7772 mono_save_args (cfg, sig, inline_args);
7775 /* FIRST CODE BLOCK */
7776 NEW_BBLOCK (cfg, tblock);
7777 tblock->cil_code = ip;
7781 ADD_BBLOCK (cfg, tblock);
7783 if (cfg->method == method) {
7784 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7785 if (breakpoint_id) {
7786 MONO_INST_NEW (cfg, ins, OP_BREAK);
7787 MONO_ADD_INS (cfg->cbb, ins);
7791 /* we use a separate basic block for the initialization code */
7792 NEW_BBLOCK (cfg, init_localsbb);
7793 if (cfg->method == method)
7794 cfg->bb_init = init_localsbb;
7795 init_localsbb->real_offset = cfg->real_offset;
7796 start_bblock->next_bb = init_localsbb;
7797 init_localsbb->next_bb = cfg->cbb;
7798 link_bblock (cfg, start_bblock, init_localsbb);
7799 link_bblock (cfg, init_localsbb, cfg->cbb);
7801 cfg->cbb = init_localsbb;
7803 if (cfg->gsharedvt && cfg->method == method) {
7804 MonoGSharedVtMethodInfo *info;
7805 MonoInst *var, *locals_var;
7808 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7809 info->method = cfg->method;
7810 info->count_entries = 16;
7811 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7812 cfg->gsharedvt_info = info;
7814 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7815 /* prevent it from being register allocated */
7816 //var->flags |= MONO_INST_VOLATILE;
7817 cfg->gsharedvt_info_var = var;
7819 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7820 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7822 /* Allocate locals */
7823 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7824 /* prevent it from being register allocated */
7825 //locals_var->flags |= MONO_INST_VOLATILE;
7826 cfg->gsharedvt_locals_var = locals_var;
7828 dreg = alloc_ireg (cfg);
7829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7831 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7832 ins->dreg = locals_var->dreg;
7834 MONO_ADD_INS (cfg->cbb, ins);
7835 cfg->gsharedvt_locals_var_ins = ins;
7837 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7840 ins->flags |= MONO_INST_INIT;
7844 if (mono_security_core_clr_enabled ()) {
7845 /* check if this is native code, e.g. an icall or a p/invoke */
7846 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7847 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7849 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7850 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7852 /* if this ia a native call then it can only be JITted from platform code */
7853 if ((icall || pinvk) && method->klass && method->klass->image) {
7854 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7855 MonoException *ex = icall ? mono_get_exception_security () :
7856 mono_get_exception_method_access ();
7857 emit_throw_exception (cfg, ex);
7864 CHECK_CFG_EXCEPTION;
7866 if (header->code_size == 0)
7869 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7874 if (cfg->method == method)
7875 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7877 for (n = 0; n < header->num_locals; ++n) {
7878 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7883 /* We force the vtable variable here for all shared methods
7884 for the possibility that they might show up in a stack
7885 trace where their exact instantiation is needed. */
7886 if (cfg->gshared && method == cfg->method) {
7887 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7888 mini_method_get_context (method)->method_inst ||
7889 method->klass->valuetype) {
7890 mono_get_vtable_var (cfg);
7892 /* FIXME: Is there a better way to do this?
7893 We need the variable live for the duration
7894 of the whole method. */
7895 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7899 /* add a check for this != NULL to inlined methods */
7900 if (is_virtual_call) {
7903 NEW_ARGLOAD (cfg, arg_ins, 0);
7904 MONO_ADD_INS (cfg->cbb, arg_ins);
7905 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7908 skip_dead_blocks = !dont_verify;
7909 if (skip_dead_blocks) {
7910 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7915 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7916 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7919 start_new_bblock = 0;
7921 if (cfg->method == method)
7922 cfg->real_offset = ip - header->code;
7924 cfg->real_offset = inline_offset;
7929 if (start_new_bblock) {
7930 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7931 if (start_new_bblock == 2) {
7932 g_assert (ip == tblock->cil_code);
7934 GET_BBLOCK (cfg, tblock, ip);
7936 cfg->cbb->next_bb = tblock;
7938 start_new_bblock = 0;
7939 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7940 if (cfg->verbose_level > 3)
7941 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7942 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7946 g_slist_free (class_inits);
7949 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7950 link_bblock (cfg, cfg->cbb, tblock);
7951 if (sp != stack_start) {
7952 handle_stack_args (cfg, stack_start, sp - stack_start);
7954 CHECK_UNVERIFIABLE (cfg);
7956 cfg->cbb->next_bb = tblock;
7958 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7959 if (cfg->verbose_level > 3)
7960 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7961 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7964 g_slist_free (class_inits);
7969 if (skip_dead_blocks) {
7970 int ip_offset = ip - header->code;
7972 if (ip_offset == bb->end)
7976 int op_size = mono_opcode_size (ip, end);
7977 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7979 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7981 if (ip_offset + op_size == bb->end) {
7982 MONO_INST_NEW (cfg, ins, OP_NOP);
7983 MONO_ADD_INS (cfg->cbb, ins);
7984 start_new_bblock = 1;
7992 * Sequence points are points where the debugger can place a breakpoint.
7993 * Currently, we generate these automatically at points where the IL
7996 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7998 * Make methods interruptable at the beginning, and at the targets of
7999 * backward branches.
8000 * Also, do this at the start of every bblock in methods with clauses too,
8001 * to be able to handle instructions with inprecise control flow like
8003 * Backward branches are handled at the end of method-to-ir ().
8005 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8006 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8008 /* Avoid sequence points on empty IL like .volatile */
8009 // FIXME: Enable this
8010 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8011 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8012 if ((sp != stack_start) && !sym_seq_point)
8013 ins->flags |= MONO_INST_NONEMPTY_STACK;
8014 MONO_ADD_INS (cfg->cbb, ins);
8017 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8020 cfg->cbb->real_offset = cfg->real_offset;
8022 if ((cfg->method == method) && cfg->coverage_info) {
8023 guint32 cil_offset = ip - header->code;
8024 cfg->coverage_info->data [cil_offset].cil_code = ip;
8026 /* TODO: Use an increment here */
8027 #if defined(TARGET_X86)
8028 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8029 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8031 MONO_ADD_INS (cfg->cbb, ins);
8033 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8034 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8038 if (cfg->verbose_level > 3)
8039 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8043 if (seq_points && !sym_seq_points && sp != stack_start) {
8045 * The C# compiler uses these nops to notify the JIT that it should
8046 * insert seq points.
8048 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8049 MONO_ADD_INS (cfg->cbb, ins);
8051 if (cfg->keep_cil_nops)
8052 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8054 MONO_INST_NEW (cfg, ins, OP_NOP);
8056 MONO_ADD_INS (cfg->cbb, ins);
8059 if (should_insert_brekpoint (cfg->method)) {
8060 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8062 MONO_INST_NEW (cfg, ins, OP_NOP);
8065 MONO_ADD_INS (cfg->cbb, ins);
8071 CHECK_STACK_OVF (1);
8072 n = (*ip)-CEE_LDARG_0;
8074 EMIT_NEW_ARGLOAD (cfg, ins, n);
8082 CHECK_STACK_OVF (1);
8083 n = (*ip)-CEE_LDLOC_0;
8085 EMIT_NEW_LOCLOAD (cfg, ins, n);
8094 n = (*ip)-CEE_STLOC_0;
8097 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8099 emit_stloc_ir (cfg, sp, header, n);
8106 CHECK_STACK_OVF (1);
8109 EMIT_NEW_ARGLOAD (cfg, ins, n);
8115 CHECK_STACK_OVF (1);
8118 NEW_ARGLOADA (cfg, ins, n);
8119 MONO_ADD_INS (cfg->cbb, ins);
8129 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8131 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8136 CHECK_STACK_OVF (1);
8139 EMIT_NEW_LOCLOAD (cfg, ins, n);
8143 case CEE_LDLOCA_S: {
8144 unsigned char *tmp_ip;
8146 CHECK_STACK_OVF (1);
8147 CHECK_LOCAL (ip [1]);
8149 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8155 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8164 CHECK_LOCAL (ip [1]);
8165 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8167 emit_stloc_ir (cfg, sp, header, ip [1]);
8172 CHECK_STACK_OVF (1);
8173 EMIT_NEW_PCONST (cfg, ins, NULL);
8174 ins->type = STACK_OBJ;
8179 CHECK_STACK_OVF (1);
8180 EMIT_NEW_ICONST (cfg, ins, -1);
8193 CHECK_STACK_OVF (1);
8194 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8200 CHECK_STACK_OVF (1);
8202 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8208 CHECK_STACK_OVF (1);
8209 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8215 CHECK_STACK_OVF (1);
8216 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8217 ins->type = STACK_I8;
8218 ins->dreg = alloc_dreg (cfg, STACK_I8);
8220 ins->inst_l = (gint64)read64 (ip);
8221 MONO_ADD_INS (cfg->cbb, ins);
8227 gboolean use_aotconst = FALSE;
8229 #ifdef TARGET_POWERPC
8230 /* FIXME: Clean this up */
8231 if (cfg->compile_aot)
8232 use_aotconst = TRUE;
8235 /* FIXME: we should really allocate this only late in the compilation process */
8236 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8238 CHECK_STACK_OVF (1);
8244 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8246 dreg = alloc_freg (cfg);
8247 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8248 ins->type = cfg->r4_stack_type;
8250 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8251 ins->type = cfg->r4_stack_type;
8252 ins->dreg = alloc_dreg (cfg, STACK_R8);
8254 MONO_ADD_INS (cfg->cbb, ins);
8264 gboolean use_aotconst = FALSE;
8266 #ifdef TARGET_POWERPC
8267 /* FIXME: Clean this up */
8268 if (cfg->compile_aot)
8269 use_aotconst = TRUE;
8272 /* FIXME: we should really allocate this only late in the compilation process */
8273 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8275 CHECK_STACK_OVF (1);
8281 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8283 dreg = alloc_freg (cfg);
8284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8285 ins->type = STACK_R8;
8287 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8288 ins->type = STACK_R8;
8289 ins->dreg = alloc_dreg (cfg, STACK_R8);
8291 MONO_ADD_INS (cfg->cbb, ins);
8300 MonoInst *temp, *store;
8302 CHECK_STACK_OVF (1);
8306 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8307 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8309 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8312 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8325 if (sp [0]->type == STACK_R8)
8326 /* we need to pop the value from the x86 FP stack */
8327 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8332 MonoMethodSignature *fsig;
8335 INLINE_FAILURE ("jmp");
8336 GSHAREDVT_FAILURE (*ip);
8339 if (stack_start != sp)
8341 token = read32 (ip + 1);
8342 /* FIXME: check the signature matches */
8343 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8346 if (cfg->gshared && mono_method_check_context_used (cmethod))
8347 GENERIC_SHARING_FAILURE (CEE_JMP);
8349 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8351 fsig = mono_method_signature (cmethod);
8352 n = fsig->param_count + fsig->hasthis;
8353 if (cfg->llvm_only) {
8356 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8357 for (i = 0; i < n; ++i)
8358 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8359 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8361 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8362 * have to emit a normal return since llvm expects it.
8365 emit_setret (cfg, ins);
8366 MONO_INST_NEW (cfg, ins, OP_BR);
8367 ins->inst_target_bb = end_bblock;
8368 MONO_ADD_INS (cfg->cbb, ins);
8369 link_bblock (cfg, cfg->cbb, end_bblock);
8372 } else if (cfg->backend->have_op_tail_call) {
8373 /* Handle tail calls similarly to calls */
8376 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8377 call->method = cmethod;
8378 call->tail_call = TRUE;
8379 call->signature = mono_method_signature (cmethod);
8380 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8381 call->inst.inst_p0 = cmethod;
8382 for (i = 0; i < n; ++i)
8383 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8385 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8386 call->vret_var = cfg->vret_addr;
8388 mono_arch_emit_call (cfg, call);
8389 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8390 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8392 for (i = 0; i < num_args; ++i)
8393 /* Prevent arguments from being optimized away */
8394 arg_array [i]->flags |= MONO_INST_VOLATILE;
8396 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8397 ins = (MonoInst*)call;
8398 ins->inst_p0 = cmethod;
8399 MONO_ADD_INS (cfg->cbb, ins);
8403 start_new_bblock = 1;
8408 MonoMethodSignature *fsig;
8411 token = read32 (ip + 1);
8415 //GSHAREDVT_FAILURE (*ip);
8420 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8423 if (method->dynamic && fsig->pinvoke) {
8427 * This is a call through a function pointer using a pinvoke
8428 * signature. Have to create a wrapper and call that instead.
8429 * FIXME: This is very slow, need to create a wrapper at JIT time
8430 * instead based on the signature.
8432 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8433 EMIT_NEW_PCONST (cfg, args [1], fsig);
8435 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8438 n = fsig->param_count + fsig->hasthis;
8442 //g_assert (!virtual_ || fsig->hasthis);
8446 inline_costs += 10 * num_calls++;
8449 * Making generic calls out of gsharedvt methods.
8450 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8451 * patching gshared method addresses into a gsharedvt method.
8453 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8455 * We pass the address to the gsharedvt trampoline in the rgctx reg
8457 MonoInst *callee = addr;
8459 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8461 GSHAREDVT_FAILURE (*ip);
8465 GSHAREDVT_FAILURE (*ip);
8467 addr = emit_get_rgctx_sig (cfg, context_used,
8468 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8469 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8473 /* Prevent inlining of methods with indirect calls */
8474 INLINE_FAILURE ("indirect call");
8476 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8477 MonoJumpInfoType info_type;
8481 * Instead of emitting an indirect call, emit a direct call
8482 * with the contents of the aotconst as the patch info.
8484 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8485 info_type = (MonoJumpInfoType)addr->inst_c1;
8486 info_data = addr->inst_p0;
8488 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8489 info_data = addr->inst_right->inst_left;
8492 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8493 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8496 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8497 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8502 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8506 /* End of call, INS should contain the result of the call, if any */
8508 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8510 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8513 CHECK_CFG_EXCEPTION;
8517 constrained_class = NULL;
8521 case CEE_CALLVIRT: {
8522 MonoInst *addr = NULL;
8523 MonoMethodSignature *fsig = NULL;
8525 int virtual_ = *ip == CEE_CALLVIRT;
8526 gboolean pass_imt_from_rgctx = FALSE;
8527 MonoInst *imt_arg = NULL;
8528 MonoInst *keep_this_alive = NULL;
8529 gboolean pass_vtable = FALSE;
8530 gboolean pass_mrgctx = FALSE;
8531 MonoInst *vtable_arg = NULL;
8532 gboolean check_this = FALSE;
8533 gboolean supported_tail_call = FALSE;
8534 gboolean tail_call = FALSE;
8535 gboolean need_seq_point = FALSE;
8536 guint32 call_opcode = *ip;
8537 gboolean emit_widen = TRUE;
8538 gboolean push_res = TRUE;
8539 gboolean skip_ret = FALSE;
8540 gboolean delegate_invoke = FALSE;
8541 gboolean direct_icall = FALSE;
8542 gboolean constrained_partial_call = FALSE;
8543 MonoMethod *cil_method;
8546 token = read32 (ip + 1);
8550 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8553 cil_method = cmethod;
8555 if (constrained_class) {
8556 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8557 if (!mini_is_gsharedvt_klass (constrained_class)) {
8558 g_assert (!cmethod->klass->valuetype);
8559 if (!mini_type_is_reference (&constrained_class->byval_arg))
8560 constrained_partial_call = TRUE;
8564 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8565 if (cfg->verbose_level > 2)
8566 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8567 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8568 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8570 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8574 if (cfg->verbose_level > 2)
8575 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8577 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8579 * This is needed since get_method_constrained can't find
8580 * the method in klass representing a type var.
8581 * The type var is guaranteed to be a reference type in this
8584 if (!mini_is_gsharedvt_klass (constrained_class))
8585 g_assert (!cmethod->klass->valuetype);
8587 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8592 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8593 /* Use the corresponding method from the base type to avoid boxing */
8594 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8595 g_assert (base_type);
8596 constrained_class = mono_class_from_mono_type (base_type);
8597 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8602 if (!dont_verify && !cfg->skip_visibility) {
8603 MonoMethod *target_method = cil_method;
8604 if (method->is_inflated) {
8605 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8608 if (!mono_method_can_access_method (method_definition, target_method) &&
8609 !mono_method_can_access_method (method, cil_method))
8610 emit_method_access_failure (cfg, method, cil_method);
8613 if (mono_security_core_clr_enabled ())
8614 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8616 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8617 /* MS.NET seems to silently convert this to a callvirt */
8622 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8623 * converts to a callvirt.
8625 * tests/bug-515884.il is an example of this behavior
8627 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8628 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8629 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8633 if (!cmethod->klass->inited)
8634 if (!mono_class_init (cmethod->klass))
8635 TYPE_LOAD_ERROR (cmethod->klass);
8637 fsig = mono_method_signature (cmethod);
8640 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8641 mini_class_is_system_array (cmethod->klass)) {
8642 array_rank = cmethod->klass->rank;
8643 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8644 direct_icall = TRUE;
8645 } else if (fsig->pinvoke) {
8646 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8647 fsig = mono_method_signature (wrapper);
8648 } else if (constrained_class) {
8650 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8654 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8655 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8657 /* See code below */
8658 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8659 MonoBasicBlock *tbb;
8661 GET_BBLOCK (cfg, tbb, ip + 5);
8662 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8664 * We want to extend the try block to cover the call, but we can't do it if the
8665 * call is made directly since its followed by an exception check.
8667 direct_icall = FALSE;
8671 mono_save_token_info (cfg, image, token, cil_method);
8673 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8674 need_seq_point = TRUE;
8676 /* Don't support calls made using type arguments for now */
8678 if (cfg->gsharedvt) {
8679 if (mini_is_gsharedvt_signature (fsig))
8680 GSHAREDVT_FAILURE (*ip);
8684 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8685 g_assert_not_reached ();
8687 n = fsig->param_count + fsig->hasthis;
8689 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8693 g_assert (!mono_method_check_context_used (cmethod));
8697 //g_assert (!virtual_ || fsig->hasthis);
8702 * We have the `constrained.' prefix opcode.
8704 if (constrained_class) {
8705 if (mini_is_gsharedvt_klass (constrained_class)) {
8706 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8707 /* The 'Own method' case below */
8708 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8709 /* 'The type parameter is instantiated as a reference type' case below. */
8711 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8712 CHECK_CFG_EXCEPTION;
8718 if (constrained_partial_call) {
8719 gboolean need_box = TRUE;
8722 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8723 * called method is not known at compile time either. The called method could end up being
8724 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8725 * to box the receiver.
8726 * A simple solution would be to box always and make a normal virtual call, but that would
8727 * be bad performance wise.
8729 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8731 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8736 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8737 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8739 ins->klass = constrained_class;
8740 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8741 CHECK_CFG_EXCEPTION;
8742 } else if (need_box) {
8744 MonoBasicBlock *is_ref_bb, *end_bb;
8745 MonoInst *nonbox_call;
8748 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8750 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8751 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8753 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8755 NEW_BBLOCK (cfg, is_ref_bb);
8756 NEW_BBLOCK (cfg, end_bb);
8758 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8763 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8768 MONO_START_BB (cfg, is_ref_bb);
8769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8770 ins->klass = constrained_class;
8771 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8772 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8776 MONO_START_BB (cfg, end_bb);
8779 nonbox_call->dreg = ins->dreg;
8782 g_assert (mono_class_is_interface (cmethod->klass));
8783 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8784 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8787 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8789 * The type parameter is instantiated as a valuetype,
8790 * but that type doesn't override the method we're
8791 * calling, so we need to box `this'.
8793 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8794 ins->klass = constrained_class;
8795 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8796 CHECK_CFG_EXCEPTION;
8797 } else if (!constrained_class->valuetype) {
8798 int dreg = alloc_ireg_ref (cfg);
8801 * The type parameter is instantiated as a reference
8802 * type. We have a managed pointer on the stack, so
8803 * we need to dereference it here.
8805 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8806 ins->type = STACK_OBJ;
8809 if (cmethod->klass->valuetype) {
8812 /* Interface method */
8815 mono_class_setup_vtable (constrained_class);
8816 CHECK_TYPELOAD (constrained_class);
8817 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8819 TYPE_LOAD_ERROR (constrained_class);
8820 slot = mono_method_get_vtable_slot (cmethod);
8822 TYPE_LOAD_ERROR (cmethod->klass);
8823 cmethod = constrained_class->vtable [ioffset + slot];
8825 if (cmethod->klass == mono_defaults.enum_class) {
8826 /* Enum implements some interfaces, so treat this as the first case */
8827 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8828 ins->klass = constrained_class;
8829 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8830 CHECK_CFG_EXCEPTION;
8835 constrained_class = NULL;
8838 if (check_call_signature (cfg, fsig, sp))
8841 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8842 delegate_invoke = TRUE;
8844 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8845 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8846 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8854 * If the callee is a shared method, then its static cctor
8855 * might not get called after the call was patched.
8857 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8858 emit_class_init (cfg, cmethod->klass);
8859 CHECK_TYPELOAD (cmethod->klass);
8862 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8865 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8867 context_used = mini_method_check_context_used (cfg, cmethod);
8869 if (context_used && mono_class_is_interface (cmethod->klass)) {
8870 /* Generic method interface
8871 calls are resolved via a
8872 helper function and don't
8874 if (!cmethod_context || !cmethod_context->method_inst)
8875 pass_imt_from_rgctx = TRUE;
8879 * If a shared method calls another
8880 * shared method then the caller must
8881 * have a generic sharing context
8882 * because the magic trampoline
8883 * requires it. FIXME: We shouldn't
8884 * have to force the vtable/mrgctx
8885 * variable here. Instead there
8886 * should be a flag in the cfg to
8887 * request a generic sharing context.
8890 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8891 mono_get_vtable_var (cfg);
8896 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8898 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8900 CHECK_TYPELOAD (cmethod->klass);
8901 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8906 g_assert (!vtable_arg);
8908 if (!cfg->compile_aot) {
8910 * emit_get_rgctx_method () calls mono_class_vtable () so check
8911 * for type load errors before.
8913 mono_class_setup_vtable (cmethod->klass);
8914 CHECK_TYPELOAD (cmethod->klass);
8917 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8919 /* !marshalbyref is needed to properly handle generic methods + remoting */
8920 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8921 MONO_METHOD_IS_FINAL (cmethod)) &&
8922 !mono_class_is_marshalbyref (cmethod->klass)) {
8929 if (pass_imt_from_rgctx) {
8930 g_assert (!pass_vtable);
8932 imt_arg = emit_get_rgctx_method (cfg, context_used,
8933 cmethod, MONO_RGCTX_INFO_METHOD);
8937 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8939 /* Calling virtual generic methods */
8940 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8941 !(MONO_METHOD_IS_FINAL (cmethod) &&
8942 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8943 fsig->generic_param_count &&
8944 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8946 MonoInst *this_temp, *this_arg_temp, *store;
8947 MonoInst *iargs [4];
8949 g_assert (fsig->is_inflated);
8951 /* Prevent inlining of methods that contain indirect calls */
8952 INLINE_FAILURE ("virtual generic call");
8954 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8955 GSHAREDVT_FAILURE (*ip);
8957 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8958 g_assert (!imt_arg);
8960 g_assert (cmethod->is_inflated);
8961 imt_arg = emit_get_rgctx_method (cfg, context_used,
8962 cmethod, MONO_RGCTX_INFO_METHOD);
8963 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8965 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8966 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8967 MONO_ADD_INS (cfg->cbb, store);
8969 /* FIXME: This should be a managed pointer */
8970 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8972 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8973 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8974 cmethod, MONO_RGCTX_INFO_METHOD);
8975 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8976 addr = mono_emit_jit_icall (cfg,
8977 mono_helper_compile_generic_method, iargs);
8979 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8981 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8988 * Implement a workaround for the inherent races involved in locking:
8994 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8995 * try block, the Exit () won't be executed, see:
8996 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8997 * To work around this, we extend such try blocks to include the last x bytes
8998 * of the Monitor.Enter () call.
9000 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9001 MonoBasicBlock *tbb;
9003 GET_BBLOCK (cfg, tbb, ip + 5);
9005 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9006 * from Monitor.Enter like ArgumentNullException.
9008 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9009 /* Mark this bblock as needing to be extended */
9010 tbb->extend_try_block = TRUE;
9014 /* Conversion to a JIT intrinsic */
9015 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9016 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9017 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9025 if ((cfg->opt & MONO_OPT_INLINE) &&
9026 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9027 mono_method_check_inlining (cfg, cmethod)) {
9029 gboolean always = FALSE;
9031 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9032 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9033 /* Prevent inlining of methods that call wrappers */
9034 INLINE_FAILURE ("wrapper call");
9035 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9039 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9041 cfg->real_offset += 5;
9043 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9044 /* *sp is already set by inline_method */
9049 inline_costs += costs;
9055 /* Tail recursion elimination */
9056 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9057 gboolean has_vtargs = FALSE;
9060 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9061 INLINE_FAILURE ("tail call");
9063 /* keep it simple */
9064 for (i = fsig->param_count - 1; i >= 0; i--) {
9065 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9070 if (need_seq_point) {
9071 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9072 need_seq_point = FALSE;
9074 for (i = 0; i < n; ++i)
9075 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9076 MONO_INST_NEW (cfg, ins, OP_BR);
9077 MONO_ADD_INS (cfg->cbb, ins);
9078 tblock = start_bblock->out_bb [0];
9079 link_bblock (cfg, cfg->cbb, tblock);
9080 ins->inst_target_bb = tblock;
9081 start_new_bblock = 1;
9083 /* skip the CEE_RET, too */
9084 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9091 inline_costs += 10 * num_calls++;
9094 * Synchronized wrappers.
9095 * Its hard to determine where to replace a method with its synchronized
9096 * wrapper without causing an infinite recursion. The current solution is
9097 * to add the synchronized wrapper in the trampolines, and to
9098 * change the called method to a dummy wrapper, and resolve that wrapper
9099 * to the real method in mono_jit_compile_method ().
9101 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9102 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9103 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9104 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9108 * Making generic calls out of gsharedvt methods.
9109 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9110 * patching gshared method addresses into a gsharedvt method.
9112 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9113 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9114 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9115 MonoRgctxInfoType info_type;
9118 //if (mono_class_is_interface (cmethod->klass))
9119 //GSHAREDVT_FAILURE (*ip);
9120 // disable for possible remoting calls
9121 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9122 GSHAREDVT_FAILURE (*ip);
9123 if (fsig->generic_param_count) {
9124 /* virtual generic call */
9125 g_assert (!imt_arg);
9126 /* Same as the virtual generic case above */
9127 imt_arg = emit_get_rgctx_method (cfg, context_used,
9128 cmethod, MONO_RGCTX_INFO_METHOD);
9129 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9131 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9132 /* This can happen when we call a fully instantiated iface method */
9133 imt_arg = emit_get_rgctx_method (cfg, context_used,
9134 cmethod, MONO_RGCTX_INFO_METHOD);
9139 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9140 keep_this_alive = sp [0];
9142 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9143 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9145 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9146 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9148 if (cfg->llvm_only) {
9149 // FIXME: Avoid initializing vtable_arg
9150 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9152 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9157 /* Generic sharing */
9160 * Use this if the callee is gsharedvt sharable too, since
9161 * at runtime we might find an instantiation so the call cannot
9162 * be patched (the 'no_patch' code path in mini-trampolines.c).
9164 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9165 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9166 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9167 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9168 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9169 INLINE_FAILURE ("gshared");
9171 g_assert (cfg->gshared && cmethod);
9175 * We are compiling a call to a
9176 * generic method from shared code,
9177 * which means that we have to look up
9178 * the method in the rgctx and do an
9182 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9184 if (cfg->llvm_only) {
9185 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9186 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9188 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9189 // FIXME: Avoid initializing imt_arg/vtable_arg
9190 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9192 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9193 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9198 /* Direct calls to icalls */
9200 MonoMethod *wrapper;
9203 /* Inline the wrapper */
9204 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9206 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9207 g_assert (costs > 0);
9208 cfg->real_offset += 5;
9210 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9211 /* *sp is already set by inline_method */
9216 inline_costs += costs;
9225 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9226 MonoInst *val = sp [fsig->param_count];
9228 if (val->type == STACK_OBJ) {
9229 MonoInst *iargs [2];
9234 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9237 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9238 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9239 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9240 emit_write_barrier (cfg, addr, val);
9241 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9242 GSHAREDVT_FAILURE (*ip);
9243 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9244 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9246 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9247 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9248 if (!cmethod->klass->element_class->valuetype && !readonly)
9249 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9250 CHECK_TYPELOAD (cmethod->klass);
9253 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9256 g_assert_not_reached ();
9263 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9267 /* Tail prefix / tail call optimization */
9269 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9270 /* FIXME: runtime generic context pointer for jumps? */
9271 /* FIXME: handle this for generic sharing eventually */
9272 if ((ins_flag & MONO_INST_TAILCALL) &&
9273 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9274 supported_tail_call = TRUE;
9276 if (supported_tail_call) {
9279 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9280 INLINE_FAILURE ("tail call");
9282 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9284 if (cfg->backend->have_op_tail_call) {
9285 /* Handle tail calls similarly to normal calls */
9288 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9290 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9291 call->tail_call = TRUE;
9292 call->method = cmethod;
9293 call->signature = mono_method_signature (cmethod);
9296 * We implement tail calls by storing the actual arguments into the
9297 * argument variables, then emitting a CEE_JMP.
9299 for (i = 0; i < n; ++i) {
9300 /* Prevent argument from being register allocated */
9301 arg_array [i]->flags |= MONO_INST_VOLATILE;
9302 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9304 ins = (MonoInst*)call;
9305 ins->inst_p0 = cmethod;
9306 ins->inst_p1 = arg_array [0];
9307 MONO_ADD_INS (cfg->cbb, ins);
9308 link_bblock (cfg, cfg->cbb, end_bblock);
9309 start_new_bblock = 1;
9311 // FIXME: Eliminate unreachable epilogs
9314 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9315 * only reachable from this call.
9317 GET_BBLOCK (cfg, tblock, ip + 5);
9318 if (tblock == cfg->cbb || tblock->in_count == 0)
9327 * Virtual calls in llvm-only mode.
9329 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9330 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9335 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9336 INLINE_FAILURE ("call");
9337 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9338 imt_arg, vtable_arg);
9340 if (tail_call && !cfg->llvm_only) {
9341 link_bblock (cfg, cfg->cbb, end_bblock);
9342 start_new_bblock = 1;
9344 // FIXME: Eliminate unreachable epilogs
9347 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9348 * only reachable from this call.
9350 GET_BBLOCK (cfg, tblock, ip + 5);
9351 if (tblock == cfg->cbb || tblock->in_count == 0)
9358 /* End of call, INS should contain the result of the call, if any */
9360 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9363 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9368 if (keep_this_alive) {
9369 MonoInst *dummy_use;
9371 /* See mono_emit_method_call_full () */
9372 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9375 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9377 * Clang can convert these calls to tail calls which screw up the stack
9378 * walk. This happens even when the -fno-optimize-sibling-calls
9379 * option is passed to clang.
9380 * Work around this by emitting a dummy call.
9382 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9385 CHECK_CFG_EXCEPTION;
9389 g_assert (*ip == CEE_RET);
9393 constrained_class = NULL;
9395 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9399 if (cfg->method != method) {
9400 /* return from inlined method */
9402 * If in_count == 0, that means the ret is unreachable due to
9403 * being preceeded by a throw. In that case, inline_method () will
9404 * handle setting the return value
9405 * (test case: test_0_inline_throw ()).
9407 if (return_var && cfg->cbb->in_count) {
9408 MonoType *ret_type = mono_method_signature (method)->ret;
9414 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9417 //g_assert (returnvar != -1);
9418 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9419 cfg->ret_var_set = TRUE;
9422 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9424 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9428 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9430 if (seq_points && !sym_seq_points) {
9432 * Place a seq point here too even through the IL stack is not
9433 * empty, so a step over on
9436 * will work correctly.
9438 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9439 MONO_ADD_INS (cfg->cbb, ins);
9442 g_assert (!return_var);
9446 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9449 emit_setret (cfg, *sp);
9452 if (sp != stack_start)
9454 MONO_INST_NEW (cfg, ins, OP_BR);
9456 ins->inst_target_bb = end_bblock;
9457 MONO_ADD_INS (cfg->cbb, ins);
9458 link_bblock (cfg, cfg->cbb, end_bblock);
9459 start_new_bblock = 1;
9463 MONO_INST_NEW (cfg, ins, OP_BR);
9465 target = ip + 1 + (signed char)(*ip);
9467 GET_BBLOCK (cfg, tblock, target);
9468 link_bblock (cfg, cfg->cbb, tblock);
9469 ins->inst_target_bb = tblock;
9470 if (sp != stack_start) {
9471 handle_stack_args (cfg, stack_start, sp - stack_start);
9473 CHECK_UNVERIFIABLE (cfg);
9475 MONO_ADD_INS (cfg->cbb, ins);
9476 start_new_bblock = 1;
9477 inline_costs += BRANCH_COST;
9491 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9493 target = ip + 1 + *(signed char*)ip;
9499 inline_costs += BRANCH_COST;
9503 MONO_INST_NEW (cfg, ins, OP_BR);
9506 target = ip + 4 + (gint32)read32(ip);
9508 GET_BBLOCK (cfg, tblock, target);
9509 link_bblock (cfg, cfg->cbb, tblock);
9510 ins->inst_target_bb = tblock;
9511 if (sp != stack_start) {
9512 handle_stack_args (cfg, stack_start, sp - stack_start);
9514 CHECK_UNVERIFIABLE (cfg);
9517 MONO_ADD_INS (cfg->cbb, ins);
9519 start_new_bblock = 1;
9520 inline_costs += BRANCH_COST;
9527 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9528 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9529 guint32 opsize = is_short ? 1 : 4;
9531 CHECK_OPSIZE (opsize);
9533 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9536 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9541 GET_BBLOCK (cfg, tblock, target);
9542 link_bblock (cfg, cfg->cbb, tblock);
9543 GET_BBLOCK (cfg, tblock, ip);
9544 link_bblock (cfg, cfg->cbb, tblock);
9546 if (sp != stack_start) {
9547 handle_stack_args (cfg, stack_start, sp - stack_start);
9548 CHECK_UNVERIFIABLE (cfg);
9551 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9552 cmp->sreg1 = sp [0]->dreg;
9553 type_from_op (cfg, cmp, sp [0], NULL);
9556 #if SIZEOF_REGISTER == 4
9557 if (cmp->opcode == OP_LCOMPARE_IMM) {
9558 /* Convert it to OP_LCOMPARE */
9559 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9560 ins->type = STACK_I8;
9561 ins->dreg = alloc_dreg (cfg, STACK_I8);
9563 MONO_ADD_INS (cfg->cbb, ins);
9564 cmp->opcode = OP_LCOMPARE;
9565 cmp->sreg2 = ins->dreg;
9568 MONO_ADD_INS (cfg->cbb, cmp);
9570 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9571 type_from_op (cfg, ins, sp [0], NULL);
9572 MONO_ADD_INS (cfg->cbb, ins);
9573 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9574 GET_BBLOCK (cfg, tblock, target);
9575 ins->inst_true_bb = tblock;
9576 GET_BBLOCK (cfg, tblock, ip);
9577 ins->inst_false_bb = tblock;
9578 start_new_bblock = 2;
9581 inline_costs += BRANCH_COST;
9596 MONO_INST_NEW (cfg, ins, *ip);
9598 target = ip + 4 + (gint32)read32(ip);
9604 inline_costs += BRANCH_COST;
9608 MonoBasicBlock **targets;
9609 MonoBasicBlock *default_bblock;
9610 MonoJumpInfoBBTable *table;
9611 int offset_reg = alloc_preg (cfg);
9612 int target_reg = alloc_preg (cfg);
9613 int table_reg = alloc_preg (cfg);
9614 int sum_reg = alloc_preg (cfg);
9615 gboolean use_op_switch;
9619 n = read32 (ip + 1);
9622 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9626 CHECK_OPSIZE (n * sizeof (guint32));
9627 target = ip + n * sizeof (guint32);
9629 GET_BBLOCK (cfg, default_bblock, target);
9630 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9632 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9633 for (i = 0; i < n; ++i) {
9634 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9635 targets [i] = tblock;
9636 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9640 if (sp != stack_start) {
9642 * Link the current bb with the targets as well, so handle_stack_args
9643 * will set their in_stack correctly.
9645 link_bblock (cfg, cfg->cbb, default_bblock);
9646 for (i = 0; i < n; ++i)
9647 link_bblock (cfg, cfg->cbb, targets [i]);
9649 handle_stack_args (cfg, stack_start, sp - stack_start);
9651 CHECK_UNVERIFIABLE (cfg);
9653 /* Undo the links */
9654 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9655 for (i = 0; i < n; ++i)
9656 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9662 for (i = 0; i < n; ++i)
9663 link_bblock (cfg, cfg->cbb, targets [i]);
9665 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9666 table->table = targets;
9667 table->table_size = n;
9669 use_op_switch = FALSE;
9671 /* ARM implements SWITCH statements differently */
9672 /* FIXME: Make it use the generic implementation */
9673 if (!cfg->compile_aot)
9674 use_op_switch = TRUE;
9677 if (COMPILE_LLVM (cfg))
9678 use_op_switch = TRUE;
9680 cfg->cbb->has_jump_table = 1;
9682 if (use_op_switch) {
9683 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9684 ins->sreg1 = src1->dreg;
9685 ins->inst_p0 = table;
9686 ins->inst_many_bb = targets;
9687 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9688 MONO_ADD_INS (cfg->cbb, ins);
9690 if (sizeof (gpointer) == 8)
9691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9695 #if SIZEOF_REGISTER == 8
9696 /* The upper word might not be zero, and we add it to a 64 bit address later */
9697 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9700 if (cfg->compile_aot) {
9701 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9703 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9704 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9705 ins->inst_p0 = table;
9706 ins->dreg = table_reg;
9707 MONO_ADD_INS (cfg->cbb, ins);
9710 /* FIXME: Use load_memindex */
9711 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9713 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9715 start_new_bblock = 1;
9716 inline_costs += (BRANCH_COST * 2);
9736 dreg = alloc_freg (cfg);
9739 dreg = alloc_lreg (cfg);
9742 dreg = alloc_ireg_ref (cfg);
9745 dreg = alloc_preg (cfg);
9748 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9749 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9750 if (*ip == CEE_LDIND_R4)
9751 ins->type = cfg->r4_stack_type;
9752 ins->flags |= ins_flag;
9753 MONO_ADD_INS (cfg->cbb, ins);
9755 if (ins_flag & MONO_INST_VOLATILE) {
9756 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9757 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9773 if (ins_flag & MONO_INST_VOLATILE) {
9774 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9775 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9778 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9779 ins->flags |= ins_flag;
9782 MONO_ADD_INS (cfg->cbb, ins);
9784 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9785 emit_write_barrier (cfg, sp [0], sp [1]);
9794 MONO_INST_NEW (cfg, ins, (*ip));
9796 ins->sreg1 = sp [0]->dreg;
9797 ins->sreg2 = sp [1]->dreg;
9798 type_from_op (cfg, ins, sp [0], sp [1]);
9800 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9802 /* Use the immediate opcodes if possible */
9803 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9804 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9805 if (imm_opcode != -1) {
9806 ins->opcode = imm_opcode;
9807 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9810 NULLIFY_INS (sp [1]);
9814 MONO_ADD_INS ((cfg)->cbb, (ins));
9816 *sp++ = mono_decompose_opcode (cfg, ins);
9833 MONO_INST_NEW (cfg, ins, (*ip));
9835 ins->sreg1 = sp [0]->dreg;
9836 ins->sreg2 = sp [1]->dreg;
9837 type_from_op (cfg, ins, sp [0], sp [1]);
9839 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9840 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9842 /* FIXME: Pass opcode to is_inst_imm */
9844 /* Use the immediate opcodes if possible */
9845 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9846 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9847 if (imm_opcode != -1) {
9848 ins->opcode = imm_opcode;
9849 if (sp [1]->opcode == OP_I8CONST) {
9850 #if SIZEOF_REGISTER == 8
9851 ins->inst_imm = sp [1]->inst_l;
9853 ins->inst_ls_word = sp [1]->inst_ls_word;
9854 ins->inst_ms_word = sp [1]->inst_ms_word;
9858 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9861 /* Might be followed by an instruction added by add_widen_op */
9862 if (sp [1]->next == NULL)
9863 NULLIFY_INS (sp [1]);
9866 MONO_ADD_INS ((cfg)->cbb, (ins));
9868 *sp++ = mono_decompose_opcode (cfg, ins);
9881 case CEE_CONV_OVF_I8:
9882 case CEE_CONV_OVF_U8:
9886 /* Special case this earlier so we have long constants in the IR */
9887 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9888 int data = sp [-1]->inst_c0;
9889 sp [-1]->opcode = OP_I8CONST;
9890 sp [-1]->type = STACK_I8;
9891 #if SIZEOF_REGISTER == 8
9892 if ((*ip) == CEE_CONV_U8)
9893 sp [-1]->inst_c0 = (guint32)data;
9895 sp [-1]->inst_c0 = data;
9897 sp [-1]->inst_ls_word = data;
9898 if ((*ip) == CEE_CONV_U8)
9899 sp [-1]->inst_ms_word = 0;
9901 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9903 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9910 case CEE_CONV_OVF_I4:
9911 case CEE_CONV_OVF_I1:
9912 case CEE_CONV_OVF_I2:
9913 case CEE_CONV_OVF_I:
9914 case CEE_CONV_OVF_U:
9917 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9918 ADD_UNOP (CEE_CONV_OVF_I8);
9925 case CEE_CONV_OVF_U1:
9926 case CEE_CONV_OVF_U2:
9927 case CEE_CONV_OVF_U4:
9930 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9931 ADD_UNOP (CEE_CONV_OVF_U8);
9938 case CEE_CONV_OVF_I1_UN:
9939 case CEE_CONV_OVF_I2_UN:
9940 case CEE_CONV_OVF_I4_UN:
9941 case CEE_CONV_OVF_I8_UN:
9942 case CEE_CONV_OVF_U1_UN:
9943 case CEE_CONV_OVF_U2_UN:
9944 case CEE_CONV_OVF_U4_UN:
9945 case CEE_CONV_OVF_U8_UN:
9946 case CEE_CONV_OVF_I_UN:
9947 case CEE_CONV_OVF_U_UN:
9954 CHECK_CFG_EXCEPTION;
9958 case CEE_ADD_OVF_UN:
9960 case CEE_MUL_OVF_UN:
9962 case CEE_SUB_OVF_UN:
9968 GSHAREDVT_FAILURE (*ip);
9971 token = read32 (ip + 1);
9972 klass = mini_get_class (method, token, generic_context);
9973 CHECK_TYPELOAD (klass);
9975 if (generic_class_is_reference_type (cfg, klass)) {
9976 MonoInst *store, *load;
9977 int dreg = alloc_ireg_ref (cfg);
9979 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9980 load->flags |= ins_flag;
9981 MONO_ADD_INS (cfg->cbb, load);
9983 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9984 store->flags |= ins_flag;
9985 MONO_ADD_INS (cfg->cbb, store);
9987 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9988 emit_write_barrier (cfg, sp [0], sp [1]);
9990 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10002 token = read32 (ip + 1);
10003 klass = mini_get_class (method, token, generic_context);
10004 CHECK_TYPELOAD (klass);
10006 /* Optimize the common ldobj+stloc combination */
10009 loc_index = ip [6];
10016 loc_index = ip [5] - CEE_STLOC_0;
10023 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10024 CHECK_LOCAL (loc_index);
10026 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10027 ins->dreg = cfg->locals [loc_index]->dreg;
10028 ins->flags |= ins_flag;
10031 if (ins_flag & MONO_INST_VOLATILE) {
10032 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10033 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10039 /* Optimize the ldobj+stobj combination */
10040 /* The reference case ends up being a load+store anyway */
10041 /* Skip this if the operation is volatile. */
10042 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10047 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10054 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10055 ins->flags |= ins_flag;
10058 if (ins_flag & MONO_INST_VOLATILE) {
10059 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10060 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10069 CHECK_STACK_OVF (1);
10071 n = read32 (ip + 1);
10073 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10074 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10075 ins->type = STACK_OBJ;
10078 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10079 MonoInst *iargs [1];
10080 char *str = (char *)mono_method_get_wrapper_data (method, n);
10082 if (cfg->compile_aot)
10083 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10085 EMIT_NEW_PCONST (cfg, iargs [0], str);
10086 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10088 if (cfg->opt & MONO_OPT_SHARED) {
10089 MonoInst *iargs [3];
10091 if (cfg->compile_aot) {
10092 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10094 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10095 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10096 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10097 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10098 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10101 if (cfg->cbb->out_of_line) {
10102 MonoInst *iargs [2];
10104 if (image == mono_defaults.corlib) {
10106 * Avoid relocations in AOT and save some space by using a
10107 * version of helper_ldstr specialized to mscorlib.
10109 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10110 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10112 /* Avoid creating the string object */
10113 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10114 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10115 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10119 if (cfg->compile_aot) {
10120 NEW_LDSTRCONST (cfg, ins, image, n);
10122 MONO_ADD_INS (cfg->cbb, ins);
10125 NEW_PCONST (cfg, ins, NULL);
10126 ins->type = STACK_OBJ;
10127 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10131 OUT_OF_MEMORY_FAILURE;
10134 MONO_ADD_INS (cfg->cbb, ins);
10143 MonoInst *iargs [2];
10144 MonoMethodSignature *fsig;
10147 MonoInst *vtable_arg = NULL;
10150 token = read32 (ip + 1);
10151 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10154 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10157 mono_save_token_info (cfg, image, token, cmethod);
10159 if (!mono_class_init (cmethod->klass))
10160 TYPE_LOAD_ERROR (cmethod->klass);
10162 context_used = mini_method_check_context_used (cfg, cmethod);
10164 if (mono_security_core_clr_enabled ())
10165 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10167 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10168 emit_class_init (cfg, cmethod->klass);
10169 CHECK_TYPELOAD (cmethod->klass);
10173 if (cfg->gsharedvt) {
10174 if (mini_is_gsharedvt_variable_signature (sig))
10175 GSHAREDVT_FAILURE (*ip);
10179 n = fsig->param_count;
10183 * Generate smaller code for the common newobj <exception> instruction in
10184 * argument checking code.
10186 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10187 is_exception_class (cmethod->klass) && n <= 2 &&
10188 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10189 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10190 MonoInst *iargs [3];
10194 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10197 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10200 iargs [1] = sp [0];
10201 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10204 iargs [1] = sp [0];
10205 iargs [2] = sp [1];
10206 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10209 g_assert_not_reached ();
10217 /* move the args to allow room for 'this' in the first position */
10223 /* check_call_signature () requires sp[0] to be set */
10224 this_ins.type = STACK_OBJ;
10225 sp [0] = &this_ins;
10226 if (check_call_signature (cfg, fsig, sp))
10231 if (mini_class_is_system_array (cmethod->klass)) {
10232 *sp = emit_get_rgctx_method (cfg, context_used,
10233 cmethod, MONO_RGCTX_INFO_METHOD);
10235 /* Avoid varargs in the common case */
10236 if (fsig->param_count == 1)
10237 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10238 else if (fsig->param_count == 2)
10239 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10240 else if (fsig->param_count == 3)
10241 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10242 else if (fsig->param_count == 4)
10243 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10245 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10246 } else if (cmethod->string_ctor) {
10247 g_assert (!context_used);
10248 g_assert (!vtable_arg);
10249 /* we simply pass a null pointer */
10250 EMIT_NEW_PCONST (cfg, *sp, NULL);
10251 /* now call the string ctor */
10252 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10254 if (cmethod->klass->valuetype) {
10255 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10256 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10257 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10262 * The code generated by mini_emit_virtual_call () expects
10263 * iargs [0] to be a boxed instance, but luckily the vcall
10264 * will be transformed into a normal call there.
10266 } else if (context_used) {
10267 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10270 MonoVTable *vtable = NULL;
10272 if (!cfg->compile_aot)
10273 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10274 CHECK_TYPELOAD (cmethod->klass);
10277 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10278 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10279 * As a workaround, we call class cctors before allocating objects.
10281 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10282 emit_class_init (cfg, cmethod->klass);
10283 if (cfg->verbose_level > 2)
10284 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10285 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10288 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10291 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10294 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10296 /* Now call the actual ctor */
10297 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10298 CHECK_CFG_EXCEPTION;
10301 if (alloc == NULL) {
10303 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10304 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10312 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10313 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10316 case CEE_CASTCLASS:
10321 token = read32 (ip + 1);
10322 klass = mini_get_class (method, token, generic_context);
10323 CHECK_TYPELOAD (klass);
10324 if (sp [0]->type != STACK_OBJ)
10327 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10328 ins->dreg = alloc_preg (cfg);
10329 ins->sreg1 = (*sp)->dreg;
10330 ins->klass = klass;
10331 ins->type = STACK_OBJ;
10332 MONO_ADD_INS (cfg->cbb, ins);
10334 CHECK_CFG_EXCEPTION;
10338 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10341 case CEE_UNBOX_ANY: {
10342 MonoInst *res, *addr;
10347 token = read32 (ip + 1);
10348 klass = mini_get_class (method, token, generic_context);
10349 CHECK_TYPELOAD (klass);
10351 mono_save_token_info (cfg, image, token, klass);
10353 context_used = mini_class_check_context_used (cfg, klass);
10355 if (mini_is_gsharedvt_klass (klass)) {
10356 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10358 } else if (generic_class_is_reference_type (cfg, klass)) {
10359 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10360 EMIT_NEW_PCONST (cfg, res, NULL);
10361 res->type = STACK_OBJ;
10363 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10364 res->dreg = alloc_preg (cfg);
10365 res->sreg1 = (*sp)->dreg;
10366 res->klass = klass;
10367 res->type = STACK_OBJ;
10368 MONO_ADD_INS (cfg->cbb, res);
10369 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10371 } else if (mono_class_is_nullable (klass)) {
10372 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10374 addr = handle_unbox (cfg, klass, sp, context_used);
10376 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10387 MonoClass *enum_class;
10388 MonoMethod *has_flag;
10394 token = read32 (ip + 1);
10395 klass = mini_get_class (method, token, generic_context);
10396 CHECK_TYPELOAD (klass);
10398 mono_save_token_info (cfg, image, token, klass);
10400 context_used = mini_class_check_context_used (cfg, klass);
10402 if (generic_class_is_reference_type (cfg, klass)) {
10408 if (klass == mono_defaults.void_class)
10410 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10412 /* frequent check in generic code: box (struct), brtrue */
10417 * <push int/long ptr>
10420 * constrained. MyFlags
10421 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10423 * If we find this sequence and the operand types on box and constrained
10424 * are equal, we can emit a specialized instruction sequence instead of
10425 * the very slow HasFlag () call.
10427 if ((cfg->opt & MONO_OPT_INTRINS) &&
10428 /* Cheap checks first. */
10429 ip + 5 + 6 + 5 < end &&
10430 ip [5] == CEE_PREFIX1 &&
10431 ip [6] == CEE_CONSTRAINED_ &&
10432 ip [11] == CEE_CALLVIRT &&
10433 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10434 mono_class_is_enum (klass) &&
10435 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10436 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10437 has_flag->klass == mono_defaults.enum_class &&
10438 !strcmp (has_flag->name, "HasFlag") &&
10439 has_flag->signature->hasthis &&
10440 has_flag->signature->param_count == 1) {
10441 CHECK_TYPELOAD (enum_class);
10443 if (enum_class == klass) {
10444 MonoInst *enum_this, *enum_flag;
10449 enum_this = sp [0];
10450 enum_flag = sp [1];
10452 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10457 // FIXME: LLVM can't handle the inconsistent bb linking
10458 if (!mono_class_is_nullable (klass) &&
10459 !mini_is_gsharedvt_klass (klass) &&
10460 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10461 (ip [5] == CEE_BRTRUE ||
10462 ip [5] == CEE_BRTRUE_S ||
10463 ip [5] == CEE_BRFALSE ||
10464 ip [5] == CEE_BRFALSE_S)) {
10465 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10467 MonoBasicBlock *true_bb, *false_bb;
10471 if (cfg->verbose_level > 3) {
10472 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10473 printf ("<box+brtrue opt>\n");
10478 case CEE_BRFALSE_S:
10481 target = ip + 1 + (signed char)(*ip);
10488 target = ip + 4 + (gint)(read32 (ip));
10492 g_assert_not_reached ();
10496 * We need to link both bblocks, since it is needed for handling stack
10497 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10498 * Branching to only one of them would lead to inconsistencies, so
10499 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10501 GET_BBLOCK (cfg, true_bb, target);
10502 GET_BBLOCK (cfg, false_bb, ip);
10504 mono_link_bblock (cfg, cfg->cbb, true_bb);
10505 mono_link_bblock (cfg, cfg->cbb, false_bb);
10507 if (sp != stack_start) {
10508 handle_stack_args (cfg, stack_start, sp - stack_start);
10510 CHECK_UNVERIFIABLE (cfg);
10513 if (COMPILE_LLVM (cfg)) {
10514 dreg = alloc_ireg (cfg);
10515 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10518 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10520 /* The JIT can't eliminate the iconst+compare */
10521 MONO_INST_NEW (cfg, ins, OP_BR);
10522 ins->inst_target_bb = is_true ? true_bb : false_bb;
10523 MONO_ADD_INS (cfg->cbb, ins);
10526 start_new_bblock = 1;
10530 *sp++ = handle_box (cfg, val, klass, context_used);
10532 CHECK_CFG_EXCEPTION;
10541 token = read32 (ip + 1);
10542 klass = mini_get_class (method, token, generic_context);
10543 CHECK_TYPELOAD (klass);
10545 mono_save_token_info (cfg, image, token, klass);
10547 context_used = mini_class_check_context_used (cfg, klass);
10549 if (mono_class_is_nullable (klass)) {
10552 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10553 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10557 ins = handle_unbox (cfg, klass, sp, context_used);
10570 MonoClassField *field;
10571 #ifndef DISABLE_REMOTING
10575 gboolean is_instance;
10577 gpointer addr = NULL;
10578 gboolean is_special_static;
10580 MonoInst *store_val = NULL;
10581 MonoInst *thread_ins;
10584 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10586 if (op == CEE_STFLD) {
10589 store_val = sp [1];
10594 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10596 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10599 if (op == CEE_STSFLD) {
10602 store_val = sp [0];
10607 token = read32 (ip + 1);
10608 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10609 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10610 klass = field->parent;
10613 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10616 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10617 FIELD_ACCESS_FAILURE (method, field);
10618 mono_class_init (klass);
10620 /* if the class is Critical then transparent code cannot access it's fields */
10621 if (!is_instance && mono_security_core_clr_enabled ())
10622 ensure_method_is_allowed_to_access_field (cfg, method, field);
10624 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10625 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10626 if (mono_security_core_clr_enabled ())
10627 ensure_method_is_allowed_to_access_field (cfg, method, field);
10630 ftype = mono_field_get_type (field);
10633 * LDFLD etc. is usable on static fields as well, so convert those cases to
10636 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10648 g_assert_not_reached ();
10650 is_instance = FALSE;
10653 context_used = mini_class_check_context_used (cfg, klass);
10655 /* INSTANCE CASE */
10657 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10658 if (op == CEE_STFLD) {
10659 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10661 #ifndef DISABLE_REMOTING
10662 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10663 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10664 MonoInst *iargs [5];
10666 GSHAREDVT_FAILURE (op);
10668 iargs [0] = sp [0];
10669 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10670 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10671 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10673 iargs [4] = sp [1];
10675 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10676 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10677 iargs, ip, cfg->real_offset, TRUE);
10678 CHECK_CFG_EXCEPTION;
10679 g_assert (costs > 0);
10681 cfg->real_offset += 5;
10683 inline_costs += costs;
10685 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10690 MonoInst *store, *wbarrier_ptr_ins = NULL;
10692 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10694 if (ins_flag & MONO_INST_VOLATILE) {
10695 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10696 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10699 if (mini_is_gsharedvt_klass (klass)) {
10700 MonoInst *offset_ins;
10702 context_used = mini_class_check_context_used (cfg, klass);
10704 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10705 /* The value is offset by 1 */
10706 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10707 dreg = alloc_ireg_mp (cfg);
10708 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10709 wbarrier_ptr_ins = ins;
10710 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10711 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10715 if (sp [0]->opcode != OP_LDADDR)
10716 store->flags |= MONO_INST_FAULT;
10718 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10719 if (mini_is_gsharedvt_klass (klass)) {
10720 g_assert (wbarrier_ptr_ins);
10721 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10723 /* insert call to write barrier */
10727 dreg = alloc_ireg_mp (cfg);
10728 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10729 emit_write_barrier (cfg, ptr, sp [1]);
10733 store->flags |= ins_flag;
10740 #ifndef DISABLE_REMOTING
10741 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10742 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10743 MonoInst *iargs [4];
10745 GSHAREDVT_FAILURE (op);
10747 iargs [0] = sp [0];
10748 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10749 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10750 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10751 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10752 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10753 iargs, ip, cfg->real_offset, TRUE);
10754 CHECK_CFG_EXCEPTION;
10755 g_assert (costs > 0);
10757 cfg->real_offset += 5;
10761 inline_costs += costs;
10763 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10769 if (sp [0]->type == STACK_VTYPE) {
10772 /* Have to compute the address of the variable */
10774 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10776 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10778 g_assert (var->klass == klass);
10780 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10784 if (op == CEE_LDFLDA) {
10785 if (sp [0]->type == STACK_OBJ) {
10786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10787 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10790 dreg = alloc_ireg_mp (cfg);
10792 if (mini_is_gsharedvt_klass (klass)) {
10793 MonoInst *offset_ins;
10795 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10796 /* The value is offset by 1 */
10797 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10798 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10800 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10802 ins->klass = mono_class_from_mono_type (field->type);
10803 ins->type = STACK_MP;
10808 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10810 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10811 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10820 if (mini_is_gsharedvt_klass (klass)) {
10821 MonoInst *offset_ins;
10823 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10824 /* The value is offset by 1 */
10825 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10826 dreg = alloc_ireg_mp (cfg);
10827 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10828 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10830 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10832 load->flags |= ins_flag;
10833 if (sp [0]->opcode != OP_LDADDR)
10834 load->flags |= MONO_INST_FAULT;
10846 context_used = mini_class_check_context_used (cfg, klass);
10848 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10849 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10853 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10854 * to be called here.
10856 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10857 mono_class_vtable (cfg->domain, klass);
10858 CHECK_TYPELOAD (klass);
10860 mono_domain_lock (cfg->domain);
10861 if (cfg->domain->special_static_fields)
10862 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10863 mono_domain_unlock (cfg->domain);
10865 is_special_static = mono_class_field_is_special_static (field);
10867 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10868 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10872 /* Generate IR to compute the field address */
10873 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10875 * Fast access to TLS data
10876 * Inline version of get_thread_static_data () in
10880 int idx, static_data_reg, array_reg, dreg;
10882 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10883 GSHAREDVT_FAILURE (op);
10885 static_data_reg = alloc_ireg (cfg);
10886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10888 if (cfg->compile_aot) {
10889 int offset_reg, offset2_reg, idx_reg;
10891 /* For TLS variables, this will return the TLS offset */
10892 EMIT_NEW_SFLDACONST (cfg, ins, field);
10893 offset_reg = ins->dreg;
10894 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10895 idx_reg = alloc_ireg (cfg);
10896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10898 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10899 array_reg = alloc_ireg (cfg);
10900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10901 offset2_reg = alloc_ireg (cfg);
10902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10904 dreg = alloc_ireg (cfg);
10905 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10907 offset = (gsize)addr & 0x7fffffff;
10908 idx = offset & 0x3f;
10910 array_reg = alloc_ireg (cfg);
10911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10912 dreg = alloc_ireg (cfg);
10913 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10915 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10916 (cfg->compile_aot && is_special_static) ||
10917 (context_used && is_special_static)) {
10918 MonoInst *iargs [2];
10920 g_assert (field->parent);
10921 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10922 if (context_used) {
10923 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10924 field, MONO_RGCTX_INFO_CLASS_FIELD);
10926 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10928 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10929 } else if (context_used) {
10930 MonoInst *static_data;
10933 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10934 method->klass->name_space, method->klass->name, method->name,
10935 depth, field->offset);
10938 if (mono_class_needs_cctor_run (klass, method))
10939 emit_class_init (cfg, klass);
10942 * The pointer we're computing here is
10944 * super_info.static_data + field->offset
10946 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10947 klass, MONO_RGCTX_INFO_STATIC_DATA);
10949 if (mini_is_gsharedvt_klass (klass)) {
10950 MonoInst *offset_ins;
10952 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10953 /* The value is offset by 1 */
10954 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10955 dreg = alloc_ireg_mp (cfg);
10956 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10957 } else if (field->offset == 0) {
10960 int addr_reg = mono_alloc_preg (cfg);
10961 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10963 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10964 MonoInst *iargs [2];
10966 g_assert (field->parent);
10967 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10968 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10969 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10971 MonoVTable *vtable = NULL;
10973 if (!cfg->compile_aot)
10974 vtable = mono_class_vtable (cfg->domain, klass);
10975 CHECK_TYPELOAD (klass);
10978 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10979 if (!(g_slist_find (class_inits, klass))) {
10980 emit_class_init (cfg, klass);
10981 if (cfg->verbose_level > 2)
10982 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10983 class_inits = g_slist_prepend (class_inits, klass);
10986 if (cfg->run_cctors) {
10987 /* This makes so that inline cannot trigger */
10988 /* .cctors: too many apps depend on them */
10989 /* running with a specific order... */
10991 if (! vtable->initialized)
10992 INLINE_FAILURE ("class init");
10993 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10994 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10995 goto exception_exit;
10999 if (cfg->compile_aot)
11000 EMIT_NEW_SFLDACONST (cfg, ins, field);
11003 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11005 EMIT_NEW_PCONST (cfg, ins, addr);
11008 MonoInst *iargs [1];
11009 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11010 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11014 /* Generate IR to do the actual load/store operation */
11016 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11017 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11018 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11021 if (op == CEE_LDSFLDA) {
11022 ins->klass = mono_class_from_mono_type (ftype);
11023 ins->type = STACK_PTR;
11025 } else if (op == CEE_STSFLD) {
11028 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11029 store->flags |= ins_flag;
11031 gboolean is_const = FALSE;
11032 MonoVTable *vtable = NULL;
11033 gpointer addr = NULL;
11035 if (!context_used) {
11036 vtable = mono_class_vtable (cfg->domain, klass);
11037 CHECK_TYPELOAD (klass);
11039 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11040 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11041 int ro_type = ftype->type;
11043 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11044 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11045 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11048 GSHAREDVT_FAILURE (op);
11050 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11053 case MONO_TYPE_BOOLEAN:
11055 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11059 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11062 case MONO_TYPE_CHAR:
11064 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11068 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11073 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11077 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11082 case MONO_TYPE_PTR:
11083 case MONO_TYPE_FNPTR:
11084 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11085 type_to_eval_stack_type ((cfg), field->type, *sp);
11088 case MONO_TYPE_STRING:
11089 case MONO_TYPE_OBJECT:
11090 case MONO_TYPE_CLASS:
11091 case MONO_TYPE_SZARRAY:
11092 case MONO_TYPE_ARRAY:
11093 if (!mono_gc_is_moving ()) {
11094 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11095 type_to_eval_stack_type ((cfg), field->type, *sp);
11103 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11108 case MONO_TYPE_VALUETYPE:
11118 CHECK_STACK_OVF (1);
11120 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11121 load->flags |= ins_flag;
11127 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11128 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11129 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11140 token = read32 (ip + 1);
11141 klass = mini_get_class (method, token, generic_context);
11142 CHECK_TYPELOAD (klass);
11143 if (ins_flag & MONO_INST_VOLATILE) {
11144 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11145 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11147 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11148 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11149 ins->flags |= ins_flag;
11150 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11151 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11152 /* insert call to write barrier */
11153 emit_write_barrier (cfg, sp [0], sp [1]);
11165 const char *data_ptr;
11167 guint32 field_token;
11173 token = read32 (ip + 1);
11175 klass = mini_get_class (method, token, generic_context);
11176 CHECK_TYPELOAD (klass);
11178 context_used = mini_class_check_context_used (cfg, klass);
11180 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11181 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11182 ins->sreg1 = sp [0]->dreg;
11183 ins->type = STACK_I4;
11184 ins->dreg = alloc_ireg (cfg);
11185 MONO_ADD_INS (cfg->cbb, ins);
11186 *sp = mono_decompose_opcode (cfg, ins);
11189 if (context_used) {
11190 MonoInst *args [3];
11191 MonoClass *array_class = mono_array_class_get (klass, 1);
11192 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11194 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11197 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11198 array_class, MONO_RGCTX_INFO_VTABLE);
11203 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11205 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11207 if (cfg->opt & MONO_OPT_SHARED) {
11208 /* Decompose now to avoid problems with references to the domainvar */
11209 MonoInst *iargs [3];
11211 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11212 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11213 iargs [2] = sp [0];
11215 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11217 /* Decompose later since it is needed by abcrem */
11218 MonoClass *array_type = mono_array_class_get (klass, 1);
11219 mono_class_vtable (cfg->domain, array_type);
11220 CHECK_TYPELOAD (array_type);
11222 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11223 ins->dreg = alloc_ireg_ref (cfg);
11224 ins->sreg1 = sp [0]->dreg;
11225 ins->inst_newa_class = klass;
11226 ins->type = STACK_OBJ;
11227 ins->klass = array_type;
11228 MONO_ADD_INS (cfg->cbb, ins);
11229 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11230 cfg->cbb->has_array_access = TRUE;
11232 /* Needed so mono_emit_load_get_addr () gets called */
11233 mono_get_got_var (cfg);
11243 * we inline/optimize the initialization sequence if possible.
11244 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11245 * for small sizes open code the memcpy
11246 * ensure the rva field is big enough
11248 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11249 MonoMethod *memcpy_method = get_memcpy_method ();
11250 MonoInst *iargs [3];
11251 int add_reg = alloc_ireg_mp (cfg);
11253 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11254 if (cfg->compile_aot) {
11255 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11257 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11259 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11260 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11269 if (sp [0]->type != STACK_OBJ)
11272 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11273 ins->dreg = alloc_preg (cfg);
11274 ins->sreg1 = sp [0]->dreg;
11275 ins->type = STACK_I4;
11276 /* This flag will be inherited by the decomposition */
11277 ins->flags |= MONO_INST_FAULT;
11278 MONO_ADD_INS (cfg->cbb, ins);
11279 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11280 cfg->cbb->has_array_access = TRUE;
11288 if (sp [0]->type != STACK_OBJ)
11291 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11293 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11294 CHECK_TYPELOAD (klass);
11295 /* we need to make sure that this array is exactly the type it needs
11296 * to be for correctness. the wrappers are lax with their usage
11297 * so we need to ignore them here
11299 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11300 MonoClass *array_class = mono_array_class_get (klass, 1);
11301 mini_emit_check_array_type (cfg, sp [0], array_class);
11302 CHECK_TYPELOAD (array_class);
11306 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11311 case CEE_LDELEM_I1:
11312 case CEE_LDELEM_U1:
11313 case CEE_LDELEM_I2:
11314 case CEE_LDELEM_U2:
11315 case CEE_LDELEM_I4:
11316 case CEE_LDELEM_U4:
11317 case CEE_LDELEM_I8:
11319 case CEE_LDELEM_R4:
11320 case CEE_LDELEM_R8:
11321 case CEE_LDELEM_REF: {
11327 if (*ip == CEE_LDELEM) {
11329 token = read32 (ip + 1);
11330 klass = mini_get_class (method, token, generic_context);
11331 CHECK_TYPELOAD (klass);
11332 mono_class_init (klass);
11335 klass = array_access_to_klass (*ip);
11337 if (sp [0]->type != STACK_OBJ)
11340 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11342 if (mini_is_gsharedvt_variable_klass (klass)) {
11343 // FIXME-VT: OP_ICONST optimization
11344 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11345 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11346 ins->opcode = OP_LOADV_MEMBASE;
11347 } else if (sp [1]->opcode == OP_ICONST) {
11348 int array_reg = sp [0]->dreg;
11349 int index_reg = sp [1]->dreg;
11350 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11352 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11353 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11355 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11356 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11358 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11359 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11362 if (*ip == CEE_LDELEM)
11369 case CEE_STELEM_I1:
11370 case CEE_STELEM_I2:
11371 case CEE_STELEM_I4:
11372 case CEE_STELEM_I8:
11373 case CEE_STELEM_R4:
11374 case CEE_STELEM_R8:
11375 case CEE_STELEM_REF:
11380 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11382 if (*ip == CEE_STELEM) {
11384 token = read32 (ip + 1);
11385 klass = mini_get_class (method, token, generic_context);
11386 CHECK_TYPELOAD (klass);
11387 mono_class_init (klass);
11390 klass = array_access_to_klass (*ip);
11392 if (sp [0]->type != STACK_OBJ)
11395 emit_array_store (cfg, klass, sp, TRUE);
11397 if (*ip == CEE_STELEM)
11404 case CEE_CKFINITE: {
11408 if (cfg->llvm_only) {
11409 MonoInst *iargs [1];
11411 iargs [0] = sp [0];
11412 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11414 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11415 ins->sreg1 = sp [0]->dreg;
11416 ins->dreg = alloc_freg (cfg);
11417 ins->type = STACK_R8;
11418 MONO_ADD_INS (cfg->cbb, ins);
11420 *sp++ = mono_decompose_opcode (cfg, ins);
11426 case CEE_REFANYVAL: {
11427 MonoInst *src_var, *src;
11429 int klass_reg = alloc_preg (cfg);
11430 int dreg = alloc_preg (cfg);
11432 GSHAREDVT_FAILURE (*ip);
11435 MONO_INST_NEW (cfg, ins, *ip);
11438 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11439 CHECK_TYPELOAD (klass);
11441 context_used = mini_class_check_context_used (cfg, klass);
11444 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11446 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11447 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11450 if (context_used) {
11451 MonoInst *klass_ins;
11453 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11454 klass, MONO_RGCTX_INFO_KLASS);
11457 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11458 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11460 mini_emit_class_check (cfg, klass_reg, klass);
11462 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11463 ins->type = STACK_MP;
11464 ins->klass = klass;
11469 case CEE_MKREFANY: {
11470 MonoInst *loc, *addr;
11472 GSHAREDVT_FAILURE (*ip);
11475 MONO_INST_NEW (cfg, ins, *ip);
11478 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11479 CHECK_TYPELOAD (klass);
11481 context_used = mini_class_check_context_used (cfg, klass);
11483 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11484 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11486 if (context_used) {
11487 MonoInst *const_ins;
11488 int type_reg = alloc_preg (cfg);
11490 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11491 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11493 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11495 int const_reg = alloc_preg (cfg);
11496 int type_reg = alloc_preg (cfg);
11498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11499 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11501 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11503 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11505 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11506 ins->type = STACK_VTYPE;
11507 ins->klass = mono_defaults.typed_reference_class;
11512 case CEE_LDTOKEN: {
11514 MonoClass *handle_class;
11516 CHECK_STACK_OVF (1);
11519 n = read32 (ip + 1);
11521 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11522 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11523 handle = mono_method_get_wrapper_data (method, n);
11524 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11525 if (handle_class == mono_defaults.typehandle_class)
11526 handle = &((MonoClass*)handle)->byval_arg;
11529 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11534 mono_class_init (handle_class);
11535 if (cfg->gshared) {
11536 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11537 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11538 /* This case handles ldtoken
11539 of an open type, like for
11542 } else if (handle_class == mono_defaults.typehandle_class) {
11543 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11544 } else if (handle_class == mono_defaults.fieldhandle_class)
11545 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11546 else if (handle_class == mono_defaults.methodhandle_class)
11547 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11549 g_assert_not_reached ();
11552 if ((cfg->opt & MONO_OPT_SHARED) &&
11553 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11554 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11555 MonoInst *addr, *vtvar, *iargs [3];
11556 int method_context_used;
11558 method_context_used = mini_method_check_context_used (cfg, method);
11560 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11562 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11563 EMIT_NEW_ICONST (cfg, iargs [1], n);
11564 if (method_context_used) {
11565 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11566 method, MONO_RGCTX_INFO_METHOD);
11567 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11569 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11570 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11572 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11574 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11576 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11578 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11579 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11580 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11581 (cmethod->klass == mono_defaults.systemtype_class) &&
11582 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11583 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11585 mono_class_init (tclass);
11586 if (context_used) {
11587 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11588 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11589 } else if (cfg->compile_aot) {
11590 if (method->wrapper_type) {
11591 error_init (&error); //got to do it since there are multiple conditionals below
11592 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11593 /* Special case for static synchronized wrappers */
11594 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11596 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11597 /* FIXME: n is not a normal token */
11599 EMIT_NEW_PCONST (cfg, ins, NULL);
11602 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11605 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11607 EMIT_NEW_PCONST (cfg, ins, rt);
11609 ins->type = STACK_OBJ;
11610 ins->klass = cmethod->klass;
11613 MonoInst *addr, *vtvar;
11615 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11617 if (context_used) {
11618 if (handle_class == mono_defaults.typehandle_class) {
11619 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11620 mono_class_from_mono_type ((MonoType *)handle),
11621 MONO_RGCTX_INFO_TYPE);
11622 } else if (handle_class == mono_defaults.methodhandle_class) {
11623 ins = emit_get_rgctx_method (cfg, context_used,
11624 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11625 } else if (handle_class == mono_defaults.fieldhandle_class) {
11626 ins = emit_get_rgctx_field (cfg, context_used,
11627 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11629 g_assert_not_reached ();
11631 } else if (cfg->compile_aot) {
11632 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11634 EMIT_NEW_PCONST (cfg, ins, handle);
11636 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11638 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11648 if (sp [-1]->type != STACK_OBJ)
11651 MONO_INST_NEW (cfg, ins, OP_THROW);
11653 ins->sreg1 = sp [0]->dreg;
11655 cfg->cbb->out_of_line = TRUE;
11656 MONO_ADD_INS (cfg->cbb, ins);
11657 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11658 MONO_ADD_INS (cfg->cbb, ins);
11661 link_bblock (cfg, cfg->cbb, end_bblock);
11662 start_new_bblock = 1;
11663 /* This can complicate code generation for llvm since the return value might not be defined */
11664 if (COMPILE_LLVM (cfg))
11665 INLINE_FAILURE ("throw");
11667 case CEE_ENDFINALLY:
11668 if (!ip_in_finally_clause (cfg, ip - header->code))
11670 /* mono_save_seq_point_info () depends on this */
11671 if (sp != stack_start)
11672 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11673 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11674 MONO_ADD_INS (cfg->cbb, ins);
11676 start_new_bblock = 1;
11679 * Control will leave the method so empty the stack, otherwise
11680 * the next basic block will start with a nonempty stack.
11682 while (sp != stack_start) {
11687 case CEE_LEAVE_S: {
11690 if (*ip == CEE_LEAVE) {
11692 target = ip + 5 + (gint32)read32(ip + 1);
11695 target = ip + 2 + (signed char)(ip [1]);
11698 /* empty the stack */
11699 while (sp != stack_start) {
11704 * If this leave statement is in a catch block, check for a
11705 * pending exception, and rethrow it if necessary.
11706 * We avoid doing this in runtime invoke wrappers, since those are called
11707 * by native code which excepts the wrapper to catch all exceptions.
11709 for (i = 0; i < header->num_clauses; ++i) {
11710 MonoExceptionClause *clause = &header->clauses [i];
11713 * Use <= in the final comparison to handle clauses with multiple
11714 * leave statements, like in bug #78024.
11715 * The ordering of the exception clauses guarantees that we find the
11716 * innermost clause.
11718 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11720 MonoBasicBlock *dont_throw;
11725 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11728 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11730 NEW_BBLOCK (cfg, dont_throw);
11733 * Currently, we always rethrow the abort exception, despite the
11734 * fact that this is not correct. See thread6.cs for an example.
11735 * But propagating the abort exception is more important than
11736 * getting the sematics right.
11738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11740 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11742 MONO_START_BB (cfg, dont_throw);
11747 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11750 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11752 MonoExceptionClause *clause;
11754 for (tmp = handlers; tmp; tmp = tmp->next) {
11755 clause = (MonoExceptionClause *)tmp->data;
11756 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11758 link_bblock (cfg, cfg->cbb, tblock);
11759 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11760 ins->inst_target_bb = tblock;
11761 ins->inst_eh_block = clause;
11762 MONO_ADD_INS (cfg->cbb, ins);
11763 cfg->cbb->has_call_handler = 1;
11764 if (COMPILE_LLVM (cfg)) {
11765 MonoBasicBlock *target_bb;
11768 * Link the finally bblock with the target, since it will
11769 * conceptually branch there.
11771 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11772 GET_BBLOCK (cfg, target_bb, target);
11773 link_bblock (cfg, tblock, target_bb);
11776 g_list_free (handlers);
11779 MONO_INST_NEW (cfg, ins, OP_BR);
11780 MONO_ADD_INS (cfg->cbb, ins);
11781 GET_BBLOCK (cfg, tblock, target);
11782 link_bblock (cfg, cfg->cbb, tblock);
11783 ins->inst_target_bb = tblock;
11785 start_new_bblock = 1;
11787 if (*ip == CEE_LEAVE)
11796 * Mono specific opcodes
11798 case MONO_CUSTOM_PREFIX: {
11800 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11804 case CEE_MONO_ICALL: {
11806 MonoJitICallInfo *info;
11808 token = read32 (ip + 2);
11809 func = mono_method_get_wrapper_data (method, token);
11810 info = mono_find_jit_icall_by_addr (func);
11812 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11815 CHECK_STACK (info->sig->param_count);
11816 sp -= info->sig->param_count;
11818 ins = mono_emit_jit_icall (cfg, info->func, sp);
11819 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11823 inline_costs += 10 * num_calls++;
11827 case CEE_MONO_LDPTR_CARD_TABLE:
11828 case CEE_MONO_LDPTR_NURSERY_START:
11829 case CEE_MONO_LDPTR_NURSERY_BITS:
11830 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11831 CHECK_STACK_OVF (1);
11834 case CEE_MONO_LDPTR_CARD_TABLE:
11835 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11837 case CEE_MONO_LDPTR_NURSERY_START:
11838 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11840 case CEE_MONO_LDPTR_NURSERY_BITS:
11841 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11843 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11844 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11850 inline_costs += 10 * num_calls++;
11853 case CEE_MONO_LDPTR: {
11856 CHECK_STACK_OVF (1);
11858 token = read32 (ip + 2);
11860 ptr = mono_method_get_wrapper_data (method, token);
11861 EMIT_NEW_PCONST (cfg, ins, ptr);
11864 inline_costs += 10 * num_calls++;
11865 /* Can't embed random pointers into AOT code */
11869 case CEE_MONO_JIT_ICALL_ADDR: {
11870 MonoJitICallInfo *callinfo;
11873 CHECK_STACK_OVF (1);
11875 token = read32 (ip + 2);
11877 ptr = mono_method_get_wrapper_data (method, token);
11878 callinfo = mono_find_jit_icall_by_addr (ptr);
11879 g_assert (callinfo);
11880 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11883 inline_costs += 10 * num_calls++;
11886 case CEE_MONO_ICALL_ADDR: {
11887 MonoMethod *cmethod;
11890 CHECK_STACK_OVF (1);
11892 token = read32 (ip + 2);
11894 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11896 if (cfg->compile_aot) {
11897 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11899 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11900 * before the call, its not needed when using direct pinvoke.
11901 * This is not an optimization, but its used to avoid looking up pinvokes
11902 * on platforms which don't support dlopen ().
11904 EMIT_NEW_PCONST (cfg, ins, NULL);
11906 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11909 ptr = mono_lookup_internal_call (cmethod);
11911 EMIT_NEW_PCONST (cfg, ins, ptr);
11917 case CEE_MONO_VTADDR: {
11918 MonoInst *src_var, *src;
11924 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11925 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11930 case CEE_MONO_NEWOBJ: {
11931 MonoInst *iargs [2];
11933 CHECK_STACK_OVF (1);
11935 token = read32 (ip + 2);
11936 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11937 mono_class_init (klass);
11938 NEW_DOMAINCONST (cfg, iargs [0]);
11939 MONO_ADD_INS (cfg->cbb, iargs [0]);
11940 NEW_CLASSCONST (cfg, iargs [1], klass);
11941 MONO_ADD_INS (cfg->cbb, iargs [1]);
11942 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11944 inline_costs += 10 * num_calls++;
11947 case CEE_MONO_OBJADDR:
11950 MONO_INST_NEW (cfg, ins, OP_MOVE);
11951 ins->dreg = alloc_ireg_mp (cfg);
11952 ins->sreg1 = sp [0]->dreg;
11953 ins->type = STACK_MP;
11954 MONO_ADD_INS (cfg->cbb, ins);
11958 case CEE_MONO_LDNATIVEOBJ:
11960 * Similar to LDOBJ, but instead load the unmanaged
11961 * representation of the vtype to the stack.
11966 token = read32 (ip + 2);
11967 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11968 g_assert (klass->valuetype);
11969 mono_class_init (klass);
11972 MonoInst *src, *dest, *temp;
11975 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11976 temp->backend.is_pinvoke = 1;
11977 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11978 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11980 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11981 dest->type = STACK_VTYPE;
11982 dest->klass = klass;
11988 case CEE_MONO_RETOBJ: {
11990 * Same as RET, but return the native representation of a vtype
11993 g_assert (cfg->ret);
11994 g_assert (mono_method_signature (method)->pinvoke);
11999 token = read32 (ip + 2);
12000 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12002 if (!cfg->vret_addr) {
12003 g_assert (cfg->ret_var_is_local);
12005 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12007 EMIT_NEW_RETLOADA (cfg, ins);
12009 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12011 if (sp != stack_start)
12014 MONO_INST_NEW (cfg, ins, OP_BR);
12015 ins->inst_target_bb = end_bblock;
12016 MONO_ADD_INS (cfg->cbb, ins);
12017 link_bblock (cfg, cfg->cbb, end_bblock);
12018 start_new_bblock = 1;
12022 case CEE_MONO_SAVE_LMF:
12023 case CEE_MONO_RESTORE_LMF:
12026 case CEE_MONO_CLASSCONST:
12027 CHECK_STACK_OVF (1);
12029 token = read32 (ip + 2);
12030 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12033 inline_costs += 10 * num_calls++;
12035 case CEE_MONO_NOT_TAKEN:
12036 cfg->cbb->out_of_line = TRUE;
12039 case CEE_MONO_TLS: {
12042 CHECK_STACK_OVF (1);
12044 key = (MonoTlsKey)read32 (ip + 2);
12045 g_assert (key < TLS_KEY_NUM);
12047 ins = mono_create_tls_get (cfg, key);
12049 ins->type = STACK_PTR;
12054 case CEE_MONO_DYN_CALL: {
12055 MonoCallInst *call;
12057 /* It would be easier to call a trampoline, but that would put an
12058 * extra frame on the stack, confusing exception handling. So
12059 * implement it inline using an opcode for now.
12062 if (!cfg->dyn_call_var) {
12063 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12064 /* prevent it from being register allocated */
12065 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12068 /* Has to use a call inst since it local regalloc expects it */
12069 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12070 ins = (MonoInst*)call;
12072 ins->sreg1 = sp [0]->dreg;
12073 ins->sreg2 = sp [1]->dreg;
12074 MONO_ADD_INS (cfg->cbb, ins);
12076 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12079 inline_costs += 10 * num_calls++;
12083 case CEE_MONO_MEMORY_BARRIER: {
12085 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12089 case CEE_MONO_ATOMIC_STORE_I4: {
12090 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12096 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12097 ins->dreg = sp [0]->dreg;
12098 ins->sreg1 = sp [1]->dreg;
12099 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12100 MONO_ADD_INS (cfg->cbb, ins);
12105 case CEE_MONO_JIT_ATTACH: {
12106 MonoInst *args [16], *domain_ins;
12107 MonoInst *ad_ins, *jit_tls_ins;
12108 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12110 g_assert (!mono_threads_is_coop_enabled ());
12112 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12114 EMIT_NEW_PCONST (cfg, ins, NULL);
12115 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12117 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12118 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12120 if (ad_ins && jit_tls_ins) {
12121 NEW_BBLOCK (cfg, next_bb);
12122 NEW_BBLOCK (cfg, call_bb);
12124 if (cfg->compile_aot) {
12125 /* AOT code is only used in the root domain */
12126 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12128 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12130 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12137 MONO_START_BB (cfg, call_bb);
12140 /* AOT code is only used in the root domain */
12141 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12142 if (cfg->compile_aot) {
12146 * This is called on unattached threads, so it cannot go through the trampoline
12147 * infrastructure. Use an indirect call through a got slot initialized at load time
12150 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12151 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12153 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12155 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12158 MONO_START_BB (cfg, next_bb);
12163 case CEE_MONO_JIT_DETACH: {
12164 MonoInst *args [16];
12166 /* Restore the original domain */
12167 dreg = alloc_ireg (cfg);
12168 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12169 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12173 case CEE_MONO_CALLI_EXTRA_ARG: {
12175 MonoMethodSignature *fsig;
12179 * This is the same as CEE_CALLI, but passes an additional argument
12180 * to the called method in llvmonly mode.
12181 * This is only used by delegate invoke wrappers to call the
12182 * actual delegate method.
12184 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12187 token = read32 (ip + 2);
12195 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12198 if (cfg->llvm_only)
12199 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12201 n = fsig->param_count + fsig->hasthis + 1;
12208 if (cfg->llvm_only) {
12210 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12211 * cconv. This is set by mono_init_delegate ().
12213 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12214 MonoInst *callee = addr;
12215 MonoInst *call, *localloc_ins;
12216 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12217 int low_bit_reg = alloc_preg (cfg);
12219 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12220 NEW_BBLOCK (cfg, end_bb);
12222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12226 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12227 addr = emit_get_rgctx_sig (cfg, context_used,
12228 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12230 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12232 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12233 ins->dreg = alloc_preg (cfg);
12234 ins->inst_imm = 2 * SIZEOF_VOID_P;
12235 MONO_ADD_INS (cfg->cbb, ins);
12236 localloc_ins = ins;
12237 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12238 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12239 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12241 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12244 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12245 MONO_START_BB (cfg, is_gsharedvt_bb);
12246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12247 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12248 ins->dreg = call->dreg;
12250 MONO_START_BB (cfg, end_bb);
12252 /* Caller uses a normal calling conv */
12254 MonoInst *callee = addr;
12255 MonoInst *call, *localloc_ins;
12256 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12257 int low_bit_reg = alloc_preg (cfg);
12259 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12260 NEW_BBLOCK (cfg, end_bb);
12262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12264 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12266 /* Normal case: callee uses a normal cconv, no conversion is needed */
12267 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12268 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12269 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12270 MONO_START_BB (cfg, is_gsharedvt_bb);
12271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12272 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12273 MONO_ADD_INS (cfg->cbb, addr);
12275 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12277 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12278 ins->dreg = alloc_preg (cfg);
12279 ins->inst_imm = 2 * SIZEOF_VOID_P;
12280 MONO_ADD_INS (cfg->cbb, ins);
12281 localloc_ins = ins;
12282 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12283 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12284 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12286 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12287 ins->dreg = call->dreg;
12288 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12290 MONO_START_BB (cfg, end_bb);
12293 /* Same as CEE_CALLI */
12294 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12296 * We pass the address to the gsharedvt trampoline in the rgctx reg
12298 MonoInst *callee = addr;
12300 addr = emit_get_rgctx_sig (cfg, context_used,
12301 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12302 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12304 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12308 if (!MONO_TYPE_IS_VOID (fsig->ret))
12309 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12311 CHECK_CFG_EXCEPTION;
12315 constrained_class = NULL;
12318 case CEE_MONO_LDDOMAIN:
12319 CHECK_STACK_OVF (1);
12320 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12324 case CEE_MONO_GET_LAST_ERROR:
12326 CHECK_STACK_OVF (1);
12328 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12329 ins->dreg = alloc_dreg (cfg, STACK_I4);
12330 ins->type = STACK_I4;
12331 MONO_ADD_INS (cfg->cbb, ins);
12337 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12343 case CEE_PREFIX1: {
12346 case CEE_ARGLIST: {
12347 /* somewhat similar to LDTOKEN */
12348 MonoInst *addr, *vtvar;
12349 CHECK_STACK_OVF (1);
12350 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12352 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12353 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12355 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12356 ins->type = STACK_VTYPE;
12357 ins->klass = mono_defaults.argumenthandle_class;
12367 MonoInst *cmp, *arg1, *arg2;
12375 * The following transforms:
12376 * CEE_CEQ into OP_CEQ
12377 * CEE_CGT into OP_CGT
12378 * CEE_CGT_UN into OP_CGT_UN
12379 * CEE_CLT into OP_CLT
12380 * CEE_CLT_UN into OP_CLT_UN
12382 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12384 MONO_INST_NEW (cfg, ins, cmp->opcode);
12385 cmp->sreg1 = arg1->dreg;
12386 cmp->sreg2 = arg2->dreg;
12387 type_from_op (cfg, cmp, arg1, arg2);
12389 add_widen_op (cfg, cmp, &arg1, &arg2);
12390 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12391 cmp->opcode = OP_LCOMPARE;
12392 else if (arg1->type == STACK_R4)
12393 cmp->opcode = OP_RCOMPARE;
12394 else if (arg1->type == STACK_R8)
12395 cmp->opcode = OP_FCOMPARE;
12397 cmp->opcode = OP_ICOMPARE;
12398 MONO_ADD_INS (cfg->cbb, cmp);
12399 ins->type = STACK_I4;
12400 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12401 type_from_op (cfg, ins, arg1, arg2);
12403 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12405 * The backends expect the fceq opcodes to do the
12408 ins->sreg1 = cmp->sreg1;
12409 ins->sreg2 = cmp->sreg2;
12412 MONO_ADD_INS (cfg->cbb, ins);
12418 MonoInst *argconst;
12419 MonoMethod *cil_method;
12421 CHECK_STACK_OVF (1);
12423 n = read32 (ip + 2);
12424 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12427 mono_class_init (cmethod->klass);
12429 mono_save_token_info (cfg, image, n, cmethod);
12431 context_used = mini_method_check_context_used (cfg, cmethod);
12433 cil_method = cmethod;
12434 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12435 emit_method_access_failure (cfg, method, cil_method);
12437 if (mono_security_core_clr_enabled ())
12438 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12441 * Optimize the common case of ldftn+delegate creation
12443 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12444 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12445 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12446 MonoInst *target_ins, *handle_ins;
12447 MonoMethod *invoke;
12448 int invoke_context_used;
12450 invoke = mono_get_delegate_invoke (ctor_method->klass);
12451 if (!invoke || !mono_method_signature (invoke))
12454 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12456 target_ins = sp [-1];
12458 if (mono_security_core_clr_enabled ())
12459 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12461 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12462 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12463 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12464 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12465 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12469 /* FIXME: SGEN support */
12470 if (invoke_context_used == 0 || cfg->llvm_only) {
12472 if (cfg->verbose_level > 3)
12473 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12474 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12477 CHECK_CFG_EXCEPTION;
12487 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12488 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12492 inline_costs += 10 * num_calls++;
12495 case CEE_LDVIRTFTN: {
12496 MonoInst *args [2];
12500 n = read32 (ip + 2);
12501 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12504 mono_class_init (cmethod->klass);
12506 context_used = mini_method_check_context_used (cfg, cmethod);
12508 if (mono_security_core_clr_enabled ())
12509 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12512 * Optimize the common case of ldvirtftn+delegate creation
12514 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12515 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12516 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12517 MonoInst *target_ins, *handle_ins;
12518 MonoMethod *invoke;
12519 int invoke_context_used;
12520 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12522 invoke = mono_get_delegate_invoke (ctor_method->klass);
12523 if (!invoke || !mono_method_signature (invoke))
12526 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12528 target_ins = sp [-1];
12530 if (mono_security_core_clr_enabled ())
12531 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12533 /* FIXME: SGEN support */
12534 if (invoke_context_used == 0 || cfg->llvm_only) {
12536 if (cfg->verbose_level > 3)
12537 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12538 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12541 CHECK_CFG_EXCEPTION;
12554 args [1] = emit_get_rgctx_method (cfg, context_used,
12555 cmethod, MONO_RGCTX_INFO_METHOD);
12558 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12560 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12563 inline_costs += 10 * num_calls++;
12567 CHECK_STACK_OVF (1);
12569 n = read16 (ip + 2);
12571 EMIT_NEW_ARGLOAD (cfg, ins, n);
12576 CHECK_STACK_OVF (1);
12578 n = read16 (ip + 2);
12580 NEW_ARGLOADA (cfg, ins, n);
12581 MONO_ADD_INS (cfg->cbb, ins);
12589 n = read16 (ip + 2);
12591 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12593 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12597 CHECK_STACK_OVF (1);
12599 n = read16 (ip + 2);
12601 EMIT_NEW_LOCLOAD (cfg, ins, n);
12606 unsigned char *tmp_ip;
12607 CHECK_STACK_OVF (1);
12609 n = read16 (ip + 2);
12612 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12618 EMIT_NEW_LOCLOADA (cfg, ins, n);
12627 n = read16 (ip + 2);
12629 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12631 emit_stloc_ir (cfg, sp, header, n);
12635 case CEE_LOCALLOC: {
12637 MonoBasicBlock *non_zero_bb, *end_bb;
12638 int alloc_ptr = alloc_preg (cfg);
12640 if (sp != stack_start)
12642 if (cfg->method != method)
12644 * Inlining this into a loop in a parent could lead to
12645 * stack overflows which is different behavior than the
12646 * non-inlined case, thus disable inlining in this case.
12648 INLINE_FAILURE("localloc");
12650 NEW_BBLOCK (cfg, non_zero_bb);
12651 NEW_BBLOCK (cfg, end_bb);
12653 /* if size != zero */
12654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12657 //size is zero, so result is NULL
12658 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12661 MONO_START_BB (cfg, non_zero_bb);
12662 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12663 ins->dreg = alloc_ptr;
12664 ins->sreg1 = sp [0]->dreg;
12665 ins->type = STACK_PTR;
12666 MONO_ADD_INS (cfg->cbb, ins);
12668 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12670 ins->flags |= MONO_INST_INIT;
12672 MONO_START_BB (cfg, end_bb);
12673 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12674 ins->type = STACK_PTR;
12680 case CEE_ENDFILTER: {
12681 MonoExceptionClause *clause, *nearest;
12686 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12688 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12689 ins->sreg1 = (*sp)->dreg;
12690 MONO_ADD_INS (cfg->cbb, ins);
12691 start_new_bblock = 1;
12695 for (cc = 0; cc < header->num_clauses; ++cc) {
12696 clause = &header->clauses [cc];
12697 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12698 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12699 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12702 g_assert (nearest);
12703 if ((ip - header->code) != nearest->handler_offset)
12708 case CEE_UNALIGNED_:
12709 ins_flag |= MONO_INST_UNALIGNED;
12710 /* FIXME: record alignment? we can assume 1 for now */
12714 case CEE_VOLATILE_:
12715 ins_flag |= MONO_INST_VOLATILE;
12719 ins_flag |= MONO_INST_TAILCALL;
12720 cfg->flags |= MONO_CFG_HAS_TAIL;
12721 /* Can't inline tail calls at this time */
12722 inline_costs += 100000;
12729 token = read32 (ip + 2);
12730 klass = mini_get_class (method, token, generic_context);
12731 CHECK_TYPELOAD (klass);
12732 if (generic_class_is_reference_type (cfg, klass))
12733 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12735 mini_emit_initobj (cfg, *sp, NULL, klass);
12739 case CEE_CONSTRAINED_:
12741 token = read32 (ip + 2);
12742 constrained_class = mini_get_class (method, token, generic_context);
12743 CHECK_TYPELOAD (constrained_class);
12747 case CEE_INITBLK: {
12748 MonoInst *iargs [3];
12752 /* Skip optimized paths for volatile operations. */
12753 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12754 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12755 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12756 /* emit_memset only works when val == 0 */
12757 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12760 iargs [0] = sp [0];
12761 iargs [1] = sp [1];
12762 iargs [2] = sp [2];
12763 if (ip [1] == CEE_CPBLK) {
12765 * FIXME: It's unclear whether we should be emitting both the acquire
12766 * and release barriers for cpblk. It is technically both a load and
12767 * store operation, so it seems like that's the sensible thing to do.
12769 * FIXME: We emit full barriers on both sides of the operation for
12770 * simplicity. We should have a separate atomic memcpy method instead.
12772 MonoMethod *memcpy_method = get_memcpy_method ();
12774 if (ins_flag & MONO_INST_VOLATILE)
12775 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12777 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12778 call->flags |= ins_flag;
12780 if (ins_flag & MONO_INST_VOLATILE)
12781 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12783 MonoMethod *memset_method = get_memset_method ();
12784 if (ins_flag & MONO_INST_VOLATILE) {
12785 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12786 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12788 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12789 call->flags |= ins_flag;
12800 ins_flag |= MONO_INST_NOTYPECHECK;
12802 ins_flag |= MONO_INST_NORANGECHECK;
12803 /* we ignore the no-nullcheck for now since we
12804 * really do it explicitly only when doing callvirt->call
12808 case CEE_RETHROW: {
12810 int handler_offset = -1;
12812 for (i = 0; i < header->num_clauses; ++i) {
12813 MonoExceptionClause *clause = &header->clauses [i];
12814 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12815 handler_offset = clause->handler_offset;
12820 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12822 if (handler_offset == -1)
12825 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12826 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12827 ins->sreg1 = load->dreg;
12828 MONO_ADD_INS (cfg->cbb, ins);
12830 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12831 MONO_ADD_INS (cfg->cbb, ins);
12834 link_bblock (cfg, cfg->cbb, end_bblock);
12835 start_new_bblock = 1;
12843 CHECK_STACK_OVF (1);
12845 token = read32 (ip + 2);
12846 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12847 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12850 val = mono_type_size (type, &ialign);
12852 MonoClass *klass = mini_get_class (method, token, generic_context);
12853 CHECK_TYPELOAD (klass);
12855 val = mono_type_size (&klass->byval_arg, &ialign);
12857 if (mini_is_gsharedvt_klass (klass))
12858 GSHAREDVT_FAILURE (*ip);
12860 EMIT_NEW_ICONST (cfg, ins, val);
12865 case CEE_REFANYTYPE: {
12866 MonoInst *src_var, *src;
12868 GSHAREDVT_FAILURE (*ip);
12874 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12876 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12877 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12878 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12883 case CEE_READONLY_:
12896 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12906 g_warning ("opcode 0x%02x not handled", *ip);
12910 if (start_new_bblock != 1)
12913 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12914 if (cfg->cbb->next_bb) {
12915 /* This could already be set because of inlining, #693905 */
12916 MonoBasicBlock *bb = cfg->cbb;
12918 while (bb->next_bb)
12920 bb->next_bb = end_bblock;
12922 cfg->cbb->next_bb = end_bblock;
12925 if (cfg->method == method && cfg->domainvar) {
12927 MonoInst *get_domain;
12929 cfg->cbb = init_localsbb;
12931 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12932 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12933 MONO_ADD_INS (cfg->cbb, store);
12936 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12937 if (cfg->compile_aot)
12938 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12939 mono_get_got_var (cfg);
12942 if (cfg->method == method && cfg->got_var)
12943 mono_emit_load_got_addr (cfg);
12945 if (init_localsbb) {
12946 cfg->cbb = init_localsbb;
12948 for (i = 0; i < header->num_locals; ++i) {
12949 emit_init_local (cfg, i, header->locals [i], init_locals);
12953 if (cfg->init_ref_vars && cfg->method == method) {
12954 /* Emit initialization for ref vars */
12955 // FIXME: Avoid duplication initialization for IL locals.
12956 for (i = 0; i < cfg->num_varinfo; ++i) {
12957 MonoInst *ins = cfg->varinfo [i];
12959 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12960 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12964 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12965 cfg->cbb = init_localsbb;
12966 emit_push_lmf (cfg);
12969 cfg->cbb = init_localsbb;
12970 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12973 MonoBasicBlock *bb;
12976 * Make seq points at backward branch targets interruptable.
12978 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12979 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12980 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12983 /* Add a sequence point for method entry/exit events */
12984 if (seq_points && cfg->gen_sdb_seq_points) {
12985 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12986 MONO_ADD_INS (init_localsbb, ins);
12987 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12988 MONO_ADD_INS (cfg->bb_exit, ins);
12992 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12993 * the code they refer to was dead (#11880).
12995 if (sym_seq_points) {
12996 for (i = 0; i < header->code_size; ++i) {
12997 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13000 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13001 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13008 if (cfg->method == method) {
13009 MonoBasicBlock *bb;
13010 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13011 if (bb == cfg->bb_init)
13014 bb->region = mono_find_block_region (cfg, bb->real_offset);
13016 mono_create_spvar_for_region (cfg, bb->region);
13017 if (cfg->verbose_level > 2)
13018 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13021 MonoBasicBlock *bb;
13022 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13023 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13024 bb->real_offset = inline_offset;
13028 if (inline_costs < 0) {
13031 /* Method is too large */
13032 mname = mono_method_full_name (method, TRUE);
13033 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13037 if ((cfg->verbose_level > 2) && (cfg->method == method))
13038 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13043 g_assert (!mono_error_ok (&cfg->error));
13047 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13051 set_exception_type_from_invalid_il (cfg, method, ip);
13055 g_slist_free (class_inits);
13056 mono_basic_block_free (original_bb);
13057 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13058 if (cfg->exception_type)
13061 return inline_costs;
13065 store_membase_reg_to_store_membase_imm (int opcode)
13068 case OP_STORE_MEMBASE_REG:
13069 return OP_STORE_MEMBASE_IMM;
13070 case OP_STOREI1_MEMBASE_REG:
13071 return OP_STOREI1_MEMBASE_IMM;
13072 case OP_STOREI2_MEMBASE_REG:
13073 return OP_STOREI2_MEMBASE_IMM;
13074 case OP_STOREI4_MEMBASE_REG:
13075 return OP_STOREI4_MEMBASE_IMM;
13076 case OP_STOREI8_MEMBASE_REG:
13077 return OP_STOREI8_MEMBASE_IMM;
13079 g_assert_not_reached ();
13086 mono_op_to_op_imm (int opcode)
13090 return OP_IADD_IMM;
13092 return OP_ISUB_IMM;
13094 return OP_IDIV_IMM;
13096 return OP_IDIV_UN_IMM;
13098 return OP_IREM_IMM;
13100 return OP_IREM_UN_IMM;
13102 return OP_IMUL_IMM;
13104 return OP_IAND_IMM;
13108 return OP_IXOR_IMM;
13110 return OP_ISHL_IMM;
13112 return OP_ISHR_IMM;
13114 return OP_ISHR_UN_IMM;
13117 return OP_LADD_IMM;
13119 return OP_LSUB_IMM;
13121 return OP_LAND_IMM;
13125 return OP_LXOR_IMM;
13127 return OP_LSHL_IMM;
13129 return OP_LSHR_IMM;
13131 return OP_LSHR_UN_IMM;
13132 #if SIZEOF_REGISTER == 8
13134 return OP_LREM_IMM;
13138 return OP_COMPARE_IMM;
13140 return OP_ICOMPARE_IMM;
13142 return OP_LCOMPARE_IMM;
13144 case OP_STORE_MEMBASE_REG:
13145 return OP_STORE_MEMBASE_IMM;
13146 case OP_STOREI1_MEMBASE_REG:
13147 return OP_STOREI1_MEMBASE_IMM;
13148 case OP_STOREI2_MEMBASE_REG:
13149 return OP_STOREI2_MEMBASE_IMM;
13150 case OP_STOREI4_MEMBASE_REG:
13151 return OP_STOREI4_MEMBASE_IMM;
13153 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13155 return OP_X86_PUSH_IMM;
13156 case OP_X86_COMPARE_MEMBASE_REG:
13157 return OP_X86_COMPARE_MEMBASE_IMM;
13159 #if defined(TARGET_AMD64)
13160 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13161 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13163 case OP_VOIDCALL_REG:
13164 return OP_VOIDCALL;
13172 return OP_LOCALLOC_IMM;
13179 ldind_to_load_membase (int opcode)
13183 return OP_LOADI1_MEMBASE;
13185 return OP_LOADU1_MEMBASE;
13187 return OP_LOADI2_MEMBASE;
13189 return OP_LOADU2_MEMBASE;
13191 return OP_LOADI4_MEMBASE;
13193 return OP_LOADU4_MEMBASE;
13195 return OP_LOAD_MEMBASE;
13196 case CEE_LDIND_REF:
13197 return OP_LOAD_MEMBASE;
13199 return OP_LOADI8_MEMBASE;
13201 return OP_LOADR4_MEMBASE;
13203 return OP_LOADR8_MEMBASE;
13205 g_assert_not_reached ();
13212 stind_to_store_membase (int opcode)
13216 return OP_STOREI1_MEMBASE_REG;
13218 return OP_STOREI2_MEMBASE_REG;
13220 return OP_STOREI4_MEMBASE_REG;
13222 case CEE_STIND_REF:
13223 return OP_STORE_MEMBASE_REG;
13225 return OP_STOREI8_MEMBASE_REG;
13227 return OP_STORER4_MEMBASE_REG;
13229 return OP_STORER8_MEMBASE_REG;
13231 g_assert_not_reached ();
13238 mono_load_membase_to_load_mem (int opcode)
13240 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13241 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13243 case OP_LOAD_MEMBASE:
13244 return OP_LOAD_MEM;
13245 case OP_LOADU1_MEMBASE:
13246 return OP_LOADU1_MEM;
13247 case OP_LOADU2_MEMBASE:
13248 return OP_LOADU2_MEM;
13249 case OP_LOADI4_MEMBASE:
13250 return OP_LOADI4_MEM;
13251 case OP_LOADU4_MEMBASE:
13252 return OP_LOADU4_MEM;
13253 #if SIZEOF_REGISTER == 8
13254 case OP_LOADI8_MEMBASE:
13255 return OP_LOADI8_MEM;
13264 op_to_op_dest_membase (int store_opcode, int opcode)
13266 #if defined(TARGET_X86)
13267 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13272 return OP_X86_ADD_MEMBASE_REG;
13274 return OP_X86_SUB_MEMBASE_REG;
13276 return OP_X86_AND_MEMBASE_REG;
13278 return OP_X86_OR_MEMBASE_REG;
13280 return OP_X86_XOR_MEMBASE_REG;
13283 return OP_X86_ADD_MEMBASE_IMM;
13286 return OP_X86_SUB_MEMBASE_IMM;
13289 return OP_X86_AND_MEMBASE_IMM;
13292 return OP_X86_OR_MEMBASE_IMM;
13295 return OP_X86_XOR_MEMBASE_IMM;
13301 #if defined(TARGET_AMD64)
13302 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13307 return OP_X86_ADD_MEMBASE_REG;
13309 return OP_X86_SUB_MEMBASE_REG;
13311 return OP_X86_AND_MEMBASE_REG;
13313 return OP_X86_OR_MEMBASE_REG;
13315 return OP_X86_XOR_MEMBASE_REG;
13317 return OP_X86_ADD_MEMBASE_IMM;
13319 return OP_X86_SUB_MEMBASE_IMM;
13321 return OP_X86_AND_MEMBASE_IMM;
13323 return OP_X86_OR_MEMBASE_IMM;
13325 return OP_X86_XOR_MEMBASE_IMM;
13327 return OP_AMD64_ADD_MEMBASE_REG;
13329 return OP_AMD64_SUB_MEMBASE_REG;
13331 return OP_AMD64_AND_MEMBASE_REG;
13333 return OP_AMD64_OR_MEMBASE_REG;
13335 return OP_AMD64_XOR_MEMBASE_REG;
13338 return OP_AMD64_ADD_MEMBASE_IMM;
13341 return OP_AMD64_SUB_MEMBASE_IMM;
13344 return OP_AMD64_AND_MEMBASE_IMM;
13347 return OP_AMD64_OR_MEMBASE_IMM;
13350 return OP_AMD64_XOR_MEMBASE_IMM;
13360 op_to_op_store_membase (int store_opcode, int opcode)
13362 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13365 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13366 return OP_X86_SETEQ_MEMBASE;
13368 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13369 return OP_X86_SETNE_MEMBASE;
13377 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13380 /* FIXME: This has sign extension issues */
13382 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13383 return OP_X86_COMPARE_MEMBASE8_IMM;
13386 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13391 return OP_X86_PUSH_MEMBASE;
13392 case OP_COMPARE_IMM:
13393 case OP_ICOMPARE_IMM:
13394 return OP_X86_COMPARE_MEMBASE_IMM;
13397 return OP_X86_COMPARE_MEMBASE_REG;
13401 #ifdef TARGET_AMD64
13402 /* FIXME: This has sign extension issues */
13404 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13405 return OP_X86_COMPARE_MEMBASE8_IMM;
13410 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13411 return OP_X86_PUSH_MEMBASE;
13413 /* FIXME: This only works for 32 bit immediates
13414 case OP_COMPARE_IMM:
13415 case OP_LCOMPARE_IMM:
13416 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13417 return OP_AMD64_COMPARE_MEMBASE_IMM;
13419 case OP_ICOMPARE_IMM:
13420 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13421 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13425 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13426 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13427 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13428 return OP_AMD64_COMPARE_MEMBASE_REG;
13431 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13432 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13441 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13444 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13450 return OP_X86_COMPARE_REG_MEMBASE;
13452 return OP_X86_ADD_REG_MEMBASE;
13454 return OP_X86_SUB_REG_MEMBASE;
13456 return OP_X86_AND_REG_MEMBASE;
13458 return OP_X86_OR_REG_MEMBASE;
13460 return OP_X86_XOR_REG_MEMBASE;
13464 #ifdef TARGET_AMD64
13465 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13468 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13470 return OP_X86_ADD_REG_MEMBASE;
13472 return OP_X86_SUB_REG_MEMBASE;
13474 return OP_X86_AND_REG_MEMBASE;
13476 return OP_X86_OR_REG_MEMBASE;
13478 return OP_X86_XOR_REG_MEMBASE;
13480 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13484 return OP_AMD64_COMPARE_REG_MEMBASE;
13486 return OP_AMD64_ADD_REG_MEMBASE;
13488 return OP_AMD64_SUB_REG_MEMBASE;
13490 return OP_AMD64_AND_REG_MEMBASE;
13492 return OP_AMD64_OR_REG_MEMBASE;
13494 return OP_AMD64_XOR_REG_MEMBASE;
13503 mono_op_to_op_imm_noemul (int opcode)
13506 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13512 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13519 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13524 return mono_op_to_op_imm (opcode);
13529 * mono_handle_global_vregs:
13531 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13535 mono_handle_global_vregs (MonoCompile *cfg)
13537 gint32 *vreg_to_bb;
13538 MonoBasicBlock *bb;
13541 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13543 #ifdef MONO_ARCH_SIMD_INTRINSICS
13544 if (cfg->uses_simd_intrinsics)
13545 mono_simd_simplify_indirection (cfg);
13548 /* Find local vregs used in more than one bb */
13549 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13550 MonoInst *ins = bb->code;
13551 int block_num = bb->block_num;
13553 if (cfg->verbose_level > 2)
13554 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13557 for (; ins; ins = ins->next) {
13558 const char *spec = INS_INFO (ins->opcode);
13559 int regtype = 0, regindex;
13562 if (G_UNLIKELY (cfg->verbose_level > 2))
13563 mono_print_ins (ins);
13565 g_assert (ins->opcode >= MONO_CEE_LAST);
13567 for (regindex = 0; regindex < 4; regindex ++) {
13570 if (regindex == 0) {
13571 regtype = spec [MONO_INST_DEST];
13572 if (regtype == ' ')
13575 } else if (regindex == 1) {
13576 regtype = spec [MONO_INST_SRC1];
13577 if (regtype == ' ')
13580 } else if (regindex == 2) {
13581 regtype = spec [MONO_INST_SRC2];
13582 if (regtype == ' ')
13585 } else if (regindex == 3) {
13586 regtype = spec [MONO_INST_SRC3];
13587 if (regtype == ' ')
13592 #if SIZEOF_REGISTER == 4
13593 /* In the LLVM case, the long opcodes are not decomposed */
13594 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13596 * Since some instructions reference the original long vreg,
13597 * and some reference the two component vregs, it is quite hard
13598 * to determine when it needs to be global. So be conservative.
13600 if (!get_vreg_to_inst (cfg, vreg)) {
13601 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13603 if (cfg->verbose_level > 2)
13604 printf ("LONG VREG R%d made global.\n", vreg);
13608 * Make the component vregs volatile since the optimizations can
13609 * get confused otherwise.
13611 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13612 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13616 g_assert (vreg != -1);
13618 prev_bb = vreg_to_bb [vreg];
13619 if (prev_bb == 0) {
13620 /* 0 is a valid block num */
13621 vreg_to_bb [vreg] = block_num + 1;
13622 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13623 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13626 if (!get_vreg_to_inst (cfg, vreg)) {
13627 if (G_UNLIKELY (cfg->verbose_level > 2))
13628 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13632 if (vreg_is_ref (cfg, vreg))
13633 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13635 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13638 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13641 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13645 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13648 g_assert_not_reached ();
13652 /* Flag as having been used in more than one bb */
13653 vreg_to_bb [vreg] = -1;
13659 /* If a variable is used in only one bblock, convert it into a local vreg */
13660 for (i = 0; i < cfg->num_varinfo; i++) {
13661 MonoInst *var = cfg->varinfo [i];
13662 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13664 switch (var->type) {
13670 #if SIZEOF_REGISTER == 8
13673 #if !defined(TARGET_X86)
13674 /* Enabling this screws up the fp stack on x86 */
13677 if (mono_arch_is_soft_float ())
13681 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13685 /* Arguments are implicitly global */
13686 /* Putting R4 vars into registers doesn't work currently */
13687 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13688 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13690 * Make that the variable's liveness interval doesn't contain a call, since
13691 * that would cause the lvreg to be spilled, making the whole optimization
13694 /* This is too slow for JIT compilation */
13696 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13698 int def_index, call_index, ins_index;
13699 gboolean spilled = FALSE;
13704 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13705 const char *spec = INS_INFO (ins->opcode);
13707 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13708 def_index = ins_index;
13710 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13711 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13712 if (call_index > def_index) {
13718 if (MONO_IS_CALL (ins))
13719 call_index = ins_index;
13729 if (G_UNLIKELY (cfg->verbose_level > 2))
13730 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13731 var->flags |= MONO_INST_IS_DEAD;
13732 cfg->vreg_to_inst [var->dreg] = NULL;
13739 * Compress the varinfo and vars tables so the liveness computation is faster and
13740 * takes up less space.
13743 for (i = 0; i < cfg->num_varinfo; ++i) {
13744 MonoInst *var = cfg->varinfo [i];
13745 if (pos < i && cfg->locals_start == i)
13746 cfg->locals_start = pos;
13747 if (!(var->flags & MONO_INST_IS_DEAD)) {
13749 cfg->varinfo [pos] = cfg->varinfo [i];
13750 cfg->varinfo [pos]->inst_c0 = pos;
13751 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13752 cfg->vars [pos].idx = pos;
13753 #if SIZEOF_REGISTER == 4
13754 if (cfg->varinfo [pos]->type == STACK_I8) {
13755 /* Modify the two component vars too */
13758 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13759 var1->inst_c0 = pos;
13760 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13761 var1->inst_c0 = pos;
13768 cfg->num_varinfo = pos;
13769 if (cfg->locals_start > cfg->num_varinfo)
13770 cfg->locals_start = cfg->num_varinfo;
13774 * mono_allocate_gsharedvt_vars:
13776 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13777 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13780 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13784 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13786 for (i = 0; i < cfg->num_varinfo; ++i) {
13787 MonoInst *ins = cfg->varinfo [i];
13790 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13791 if (i >= cfg->locals_start) {
13793 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13794 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13795 ins->opcode = OP_GSHAREDVT_LOCAL;
13796 ins->inst_imm = idx;
13799 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13800 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13807 * mono_spill_global_vars:
13809 * Generate spill code for variables which are not allocated to registers,
13810 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13811 * code is generated which could be optimized by the local optimization passes.
13814 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13816 MonoBasicBlock *bb;
13818 int orig_next_vreg;
13819 guint32 *vreg_to_lvreg;
13821 guint32 i, lvregs_len, lvregs_size;
13822 gboolean dest_has_lvreg = FALSE;
13823 MonoStackType stacktypes [128];
13824 MonoInst **live_range_start, **live_range_end;
13825 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13827 *need_local_opts = FALSE;
13829 memset (spec2, 0, sizeof (spec2));
13831 /* FIXME: Move this function to mini.c */
13832 stacktypes ['i'] = STACK_PTR;
13833 stacktypes ['l'] = STACK_I8;
13834 stacktypes ['f'] = STACK_R8;
13835 #ifdef MONO_ARCH_SIMD_INTRINSICS
13836 stacktypes ['x'] = STACK_VTYPE;
13839 #if SIZEOF_REGISTER == 4
13840 /* Create MonoInsts for longs */
13841 for (i = 0; i < cfg->num_varinfo; i++) {
13842 MonoInst *ins = cfg->varinfo [i];
13844 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13845 switch (ins->type) {
13850 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13853 g_assert (ins->opcode == OP_REGOFFSET);
13855 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13857 tree->opcode = OP_REGOFFSET;
13858 tree->inst_basereg = ins->inst_basereg;
13859 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13861 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13863 tree->opcode = OP_REGOFFSET;
13864 tree->inst_basereg = ins->inst_basereg;
13865 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13875 if (cfg->compute_gc_maps) {
13876 /* registers need liveness info even for !non refs */
13877 for (i = 0; i < cfg->num_varinfo; i++) {
13878 MonoInst *ins = cfg->varinfo [i];
13880 if (ins->opcode == OP_REGVAR)
13881 ins->flags |= MONO_INST_GC_TRACK;
13885 /* FIXME: widening and truncation */
13888 * As an optimization, when a variable allocated to the stack is first loaded into
13889 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13890 * the variable again.
13892 orig_next_vreg = cfg->next_vreg;
13893 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13894 lvregs_size = 1024;
13895 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13899 * These arrays contain the first and last instructions accessing a given
13901 * Since we emit bblocks in the same order we process them here, and we
13902 * don't split live ranges, these will precisely describe the live range of
13903 * the variable, i.e. the instruction range where a valid value can be found
13904 * in the variables location.
13905 * The live range is computed using the liveness info computed by the liveness pass.
13906 * We can't use vmv->range, since that is an abstract live range, and we need
13907 * one which is instruction precise.
13908 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13910 /* FIXME: Only do this if debugging info is requested */
13911 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13912 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13913 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13914 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13916 /* Add spill loads/stores */
13917 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13920 if (cfg->verbose_level > 2)
13921 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13923 /* Clear vreg_to_lvreg array */
13924 for (i = 0; i < lvregs_len; i++)
13925 vreg_to_lvreg [lvregs [i]] = 0;
13929 MONO_BB_FOR_EACH_INS (bb, ins) {
13930 const char *spec = INS_INFO (ins->opcode);
13931 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13932 gboolean store, no_lvreg;
13933 int sregs [MONO_MAX_SRC_REGS];
13935 if (G_UNLIKELY (cfg->verbose_level > 2))
13936 mono_print_ins (ins);
13938 if (ins->opcode == OP_NOP)
13942 * We handle LDADDR here as well, since it can only be decomposed
13943 * when variable addresses are known.
13945 if (ins->opcode == OP_LDADDR) {
13946 MonoInst *var = (MonoInst *)ins->inst_p0;
13948 if (var->opcode == OP_VTARG_ADDR) {
13949 /* Happens on SPARC/S390 where vtypes are passed by reference */
13950 MonoInst *vtaddr = var->inst_left;
13951 if (vtaddr->opcode == OP_REGVAR) {
13952 ins->opcode = OP_MOVE;
13953 ins->sreg1 = vtaddr->dreg;
13955 else if (var->inst_left->opcode == OP_REGOFFSET) {
13956 ins->opcode = OP_LOAD_MEMBASE;
13957 ins->inst_basereg = vtaddr->inst_basereg;
13958 ins->inst_offset = vtaddr->inst_offset;
13961 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13962 /* gsharedvt arg passed by ref */
13963 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13965 ins->opcode = OP_LOAD_MEMBASE;
13966 ins->inst_basereg = var->inst_basereg;
13967 ins->inst_offset = var->inst_offset;
13968 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13969 MonoInst *load, *load2, *load3;
13970 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13971 int reg1, reg2, reg3;
13972 MonoInst *info_var = cfg->gsharedvt_info_var;
13973 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13977 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13980 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13982 g_assert (info_var);
13983 g_assert (locals_var);
13985 /* Mark the instruction used to compute the locals var as used */
13986 cfg->gsharedvt_locals_var_ins = NULL;
13988 /* Load the offset */
13989 if (info_var->opcode == OP_REGOFFSET) {
13990 reg1 = alloc_ireg (cfg);
13991 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13992 } else if (info_var->opcode == OP_REGVAR) {
13994 reg1 = info_var->dreg;
13996 g_assert_not_reached ();
13998 reg2 = alloc_ireg (cfg);
13999 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14000 /* Load the locals area address */
14001 reg3 = alloc_ireg (cfg);
14002 if (locals_var->opcode == OP_REGOFFSET) {
14003 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14004 } else if (locals_var->opcode == OP_REGVAR) {
14005 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14007 g_assert_not_reached ();
14009 /* Compute the address */
14010 ins->opcode = OP_PADD;
14014 mono_bblock_insert_before_ins (bb, ins, load3);
14015 mono_bblock_insert_before_ins (bb, load3, load2);
14017 mono_bblock_insert_before_ins (bb, load2, load);
14019 g_assert (var->opcode == OP_REGOFFSET);
14021 ins->opcode = OP_ADD_IMM;
14022 ins->sreg1 = var->inst_basereg;
14023 ins->inst_imm = var->inst_offset;
14026 *need_local_opts = TRUE;
14027 spec = INS_INFO (ins->opcode);
14030 if (ins->opcode < MONO_CEE_LAST) {
14031 mono_print_ins (ins);
14032 g_assert_not_reached ();
14036 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14040 if (MONO_IS_STORE_MEMBASE (ins)) {
14041 tmp_reg = ins->dreg;
14042 ins->dreg = ins->sreg2;
14043 ins->sreg2 = tmp_reg;
14046 spec2 [MONO_INST_DEST] = ' ';
14047 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14048 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14049 spec2 [MONO_INST_SRC3] = ' ';
14051 } else if (MONO_IS_STORE_MEMINDEX (ins))
14052 g_assert_not_reached ();
14057 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14058 printf ("\t %.3s %d", spec, ins->dreg);
14059 num_sregs = mono_inst_get_src_registers (ins, sregs);
14060 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14061 printf (" %d", sregs [srcindex]);
14068 regtype = spec [MONO_INST_DEST];
14069 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14072 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14073 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14074 MonoInst *store_ins;
14076 MonoInst *def_ins = ins;
14077 int dreg = ins->dreg; /* The original vreg */
14079 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14081 if (var->opcode == OP_REGVAR) {
14082 ins->dreg = var->dreg;
14083 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14085 * Instead of emitting a load+store, use a _membase opcode.
14087 g_assert (var->opcode == OP_REGOFFSET);
14088 if (ins->opcode == OP_MOVE) {
14092 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14093 ins->inst_basereg = var->inst_basereg;
14094 ins->inst_offset = var->inst_offset;
14097 spec = INS_INFO (ins->opcode);
14101 g_assert (var->opcode == OP_REGOFFSET);
14103 prev_dreg = ins->dreg;
14105 /* Invalidate any previous lvreg for this vreg */
14106 vreg_to_lvreg [ins->dreg] = 0;
14110 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14112 store_opcode = OP_STOREI8_MEMBASE_REG;
14115 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14117 #if SIZEOF_REGISTER != 8
14118 if (regtype == 'l') {
14119 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14120 mono_bblock_insert_after_ins (bb, ins, store_ins);
14121 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14122 mono_bblock_insert_after_ins (bb, ins, store_ins);
14123 def_ins = store_ins;
14128 g_assert (store_opcode != OP_STOREV_MEMBASE);
14130 /* Try to fuse the store into the instruction itself */
14131 /* FIXME: Add more instructions */
14132 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14133 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14134 ins->inst_imm = ins->inst_c0;
14135 ins->inst_destbasereg = var->inst_basereg;
14136 ins->inst_offset = var->inst_offset;
14137 spec = INS_INFO (ins->opcode);
14138 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14139 ins->opcode = store_opcode;
14140 ins->inst_destbasereg = var->inst_basereg;
14141 ins->inst_offset = var->inst_offset;
14145 tmp_reg = ins->dreg;
14146 ins->dreg = ins->sreg2;
14147 ins->sreg2 = tmp_reg;
14150 spec2 [MONO_INST_DEST] = ' ';
14151 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14152 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14153 spec2 [MONO_INST_SRC3] = ' ';
14155 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14156 // FIXME: The backends expect the base reg to be in inst_basereg
14157 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14159 ins->inst_basereg = var->inst_basereg;
14160 ins->inst_offset = var->inst_offset;
14161 spec = INS_INFO (ins->opcode);
14163 /* printf ("INS: "); mono_print_ins (ins); */
14164 /* Create a store instruction */
14165 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14167 /* Insert it after the instruction */
14168 mono_bblock_insert_after_ins (bb, ins, store_ins);
14170 def_ins = store_ins;
14173 * We can't assign ins->dreg to var->dreg here, since the
14174 * sregs could use it. So set a flag, and do it after
14177 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14178 dest_has_lvreg = TRUE;
14183 if (def_ins && !live_range_start [dreg]) {
14184 live_range_start [dreg] = def_ins;
14185 live_range_start_bb [dreg] = bb;
14188 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14191 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14192 tmp->inst_c1 = dreg;
14193 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14200 num_sregs = mono_inst_get_src_registers (ins, sregs);
14201 for (srcindex = 0; srcindex < 3; ++srcindex) {
14202 regtype = spec [MONO_INST_SRC1 + srcindex];
14203 sreg = sregs [srcindex];
14205 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14206 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14207 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14208 MonoInst *use_ins = ins;
14209 MonoInst *load_ins;
14210 guint32 load_opcode;
14212 if (var->opcode == OP_REGVAR) {
14213 sregs [srcindex] = var->dreg;
14214 //mono_inst_set_src_registers (ins, sregs);
14215 live_range_end [sreg] = use_ins;
14216 live_range_end_bb [sreg] = bb;
14218 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14221 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14222 /* var->dreg is a hreg */
14223 tmp->inst_c1 = sreg;
14224 mono_bblock_insert_after_ins (bb, ins, tmp);
14230 g_assert (var->opcode == OP_REGOFFSET);
14232 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14234 g_assert (load_opcode != OP_LOADV_MEMBASE);
14236 if (vreg_to_lvreg [sreg]) {
14237 g_assert (vreg_to_lvreg [sreg] != -1);
14239 /* The variable is already loaded to an lvreg */
14240 if (G_UNLIKELY (cfg->verbose_level > 2))
14241 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14242 sregs [srcindex] = vreg_to_lvreg [sreg];
14243 //mono_inst_set_src_registers (ins, sregs);
14247 /* Try to fuse the load into the instruction */
14248 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14249 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14250 sregs [0] = var->inst_basereg;
14251 //mono_inst_set_src_registers (ins, sregs);
14252 ins->inst_offset = var->inst_offset;
14253 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14254 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14255 sregs [1] = var->inst_basereg;
14256 //mono_inst_set_src_registers (ins, sregs);
14257 ins->inst_offset = var->inst_offset;
14259 if (MONO_IS_REAL_MOVE (ins)) {
14260 ins->opcode = OP_NOP;
14263 //printf ("%d ", srcindex); mono_print_ins (ins);
14265 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14267 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14268 if (var->dreg == prev_dreg) {
14270 * sreg refers to the value loaded by the load
14271 * emitted below, but we need to use ins->dreg
14272 * since it refers to the store emitted earlier.
14276 g_assert (sreg != -1);
14277 vreg_to_lvreg [var->dreg] = sreg;
14278 if (lvregs_len >= lvregs_size) {
14279 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14280 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14281 lvregs = new_lvregs;
14284 lvregs [lvregs_len ++] = var->dreg;
14288 sregs [srcindex] = sreg;
14289 //mono_inst_set_src_registers (ins, sregs);
14291 #if SIZEOF_REGISTER != 8
14292 if (regtype == 'l') {
14293 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14294 mono_bblock_insert_before_ins (bb, ins, load_ins);
14295 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14296 mono_bblock_insert_before_ins (bb, ins, load_ins);
14297 use_ins = load_ins;
14302 #if SIZEOF_REGISTER == 4
14303 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14305 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14306 mono_bblock_insert_before_ins (bb, ins, load_ins);
14307 use_ins = load_ins;
14311 if (var->dreg < orig_next_vreg) {
14312 live_range_end [var->dreg] = use_ins;
14313 live_range_end_bb [var->dreg] = bb;
14316 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14319 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14320 tmp->inst_c1 = var->dreg;
14321 mono_bblock_insert_after_ins (bb, ins, tmp);
14325 mono_inst_set_src_registers (ins, sregs);
14327 if (dest_has_lvreg) {
14328 g_assert (ins->dreg != -1);
14329 vreg_to_lvreg [prev_dreg] = ins->dreg;
14330 if (lvregs_len >= lvregs_size) {
14331 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14332 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14333 lvregs = new_lvregs;
14336 lvregs [lvregs_len ++] = prev_dreg;
14337 dest_has_lvreg = FALSE;
14341 tmp_reg = ins->dreg;
14342 ins->dreg = ins->sreg2;
14343 ins->sreg2 = tmp_reg;
14346 if (MONO_IS_CALL (ins)) {
14347 /* Clear vreg_to_lvreg array */
14348 for (i = 0; i < lvregs_len; i++)
14349 vreg_to_lvreg [lvregs [i]] = 0;
14351 } else if (ins->opcode == OP_NOP) {
14353 MONO_INST_NULLIFY_SREGS (ins);
14356 if (cfg->verbose_level > 2)
14357 mono_print_ins_index (1, ins);
14360 /* Extend the live range based on the liveness info */
14361 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14362 for (i = 0; i < cfg->num_varinfo; i ++) {
14363 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14365 if (vreg_is_volatile (cfg, vi->vreg))
14366 /* The liveness info is incomplete */
14369 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14370 /* Live from at least the first ins of this bb */
14371 live_range_start [vi->vreg] = bb->code;
14372 live_range_start_bb [vi->vreg] = bb;
14375 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14376 /* Live at least until the last ins of this bb */
14377 live_range_end [vi->vreg] = bb->last_ins;
14378 live_range_end_bb [vi->vreg] = bb;
14385 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14386 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14388 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14389 for (i = 0; i < cfg->num_varinfo; ++i) {
14390 int vreg = MONO_VARINFO (cfg, i)->vreg;
14393 if (live_range_start [vreg]) {
14394 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14396 ins->inst_c1 = vreg;
14397 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14399 if (live_range_end [vreg]) {
14400 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14402 ins->inst_c1 = vreg;
14403 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14404 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14406 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14411 if (cfg->gsharedvt_locals_var_ins) {
14412 /* Nullify if unused */
14413 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14414 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14417 g_free (live_range_start);
14418 g_free (live_range_end);
14419 g_free (live_range_start_bb);
14420 g_free (live_range_end_bb);
14426 * - use 'iadd' instead of 'int_add'
14427 * - handling ovf opcodes: decompose in method_to_ir.
14428 * - unify iregs/fregs
14429 * -> partly done, the missing parts are:
14430 * - a more complete unification would involve unifying the hregs as well, so
14431 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14432 * would no longer map to the machine hregs, so the code generators would need to
14433 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14434 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14435 * fp/non-fp branches speeds it up by about 15%.
14436 * - use sext/zext opcodes instead of shifts
14438 * - get rid of TEMPLOADs if possible and use vregs instead
14439 * - clean up usage of OP_P/OP_ opcodes
14440 * - cleanup usage of DUMMY_USE
14441 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14443 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14444 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14445 * - make sure handle_stack_args () is called before the branch is emitted
14446 * - when the new IR is done, get rid of all unused stuff
14447 * - COMPARE/BEQ as separate instructions or unify them ?
14448 * - keeping them separate allows specialized compare instructions like
14449 * compare_imm, compare_membase
14450 * - most back ends unify fp compare+branch, fp compare+ceq
14451 * - integrate mono_save_args into inline_method
14452 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14453 * - handle long shift opts on 32 bit platforms somehow: they require
14454 * 3 sregs (2 for arg1 and 1 for arg2)
14455 * - make byref a 'normal' type.
14456 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14457 * variable if needed.
14458 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14459 * like inline_method.
14460 * - remove inlining restrictions
14461 * - fix LNEG and enable cfold of INEG
14462 * - generalize x86 optimizations like ldelema as a peephole optimization
14463 * - add store_mem_imm for amd64
14464 * - optimize the loading of the interruption flag in the managed->native wrappers
14465 * - avoid special handling of OP_NOP in passes
14466 * - move code inserting instructions into one function/macro.
14467 * - try a coalescing phase after liveness analysis
14468 * - add float -> vreg conversion + local optimizations on !x86
14469 * - figure out how to handle decomposed branches during optimizations, ie.
14470 * compare+branch, op_jump_table+op_br etc.
14471 * - promote RuntimeXHandles to vregs
14472 * - vtype cleanups:
14473 * - add a NEW_VARLOADA_VREG macro
14474 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14475 * accessing vtype fields.
14476 * - get rid of I8CONST on 64 bit platforms
14477 * - dealing with the increase in code size due to branches created during opcode
14479 * - use extended basic blocks
14480 * - all parts of the JIT
14481 * - handle_global_vregs () && local regalloc
14482 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14483 * - sources of increase in code size:
14486 * - isinst and castclass
14487 * - lvregs not allocated to global registers even if used multiple times
14488 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14490 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14491 * - add all micro optimizations from the old JIT
14492 * - put tree optimizations into the deadce pass
14493 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14494 * specific function.
14495 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14496 * fcompare + branchCC.
14497 * - create a helper function for allocating a stack slot, taking into account
14498 * MONO_CFG_HAS_SPILLUP.
14500 * - merge the ia64 switch changes.
14501 * - optimize mono_regstate2_alloc_int/float.
14502 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14503 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14504 * parts of the tree could be separated by other instructions, killing the tree
14505 * arguments, or stores killing loads etc. Also, should we fold loads into other
14506 * instructions if the result of the load is used multiple times ?
14507 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14508 * - LAST MERGE: 108395.
14509 * - when returning vtypes in registers, generate IR and append it to the end of the
14510 * last bb instead of doing it in the epilog.
14511 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14519 - When to decompose opcodes:
14520 - earlier: this makes some optimizations hard to implement, since the low level IR
14521 no longer contains the neccessary information. But it is easier to do.
14522 - later: harder to implement, enables more optimizations.
14523 - Branches inside bblocks:
14524 - created when decomposing complex opcodes.
14525 - branches to another bblock: harmless, but not tracked by the branch
14526 optimizations, so need to branch to a label at the start of the bblock.
14527 - branches to inside the same bblock: very problematic, trips up the local
14528 reg allocator. Can be fixed by spitting the current bblock, but that is a
14529 complex operation, since some local vregs can become global vregs etc.
14530 - Local/global vregs:
14531 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14532 local register allocator.
14533 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14534 structure, created by mono_create_var (). Assigned to hregs or the stack by
14535 the global register allocator.
14536 - When to do optimizations like alu->alu_imm:
14537 - earlier -> saves work later on since the IR will be smaller/simpler
14538 - later -> can work on more instructions
14539 - Handling of valuetypes:
14540 - When a vtype is pushed on the stack, a new temporary is created, an
14541 instruction computing its address (LDADDR) is emitted and pushed on
14542 the stack. Need to optimize cases when the vtype is used immediately as in
14543 argument passing, stloc etc.
14544 - Instead of the to_end stuff in the old JIT, simply call the function handling
14545 the values on the stack before emitting the last instruction of the bb.
14548 #else /* !DISABLE_JIT */
14550 MONO_EMPTY_SOURCE_FILE (method_to_ir);
14552 #endif /* !DISABLE_JIT */