2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
511 res = g_list_append (res, handler);
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
567 inst->klass = klass = mono_class_from_mono_type (type);
569 inst->type = STACK_MP;
574 switch (type->type) {
576 inst->type = STACK_INV;
580 case MONO_TYPE_BOOLEAN:
586 inst->type = STACK_I4;
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
603 inst->type = STACK_I8;
607 inst->type = STACK_R8;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
615 inst->type = STACK_VTYPE;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
789 ins->opcode = OP_ICOMPARE;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
806 ins->opcode += beqops_op_map [src1->type];
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
839 ins->type = STACK_R8;
840 switch (src1->type) {
843 ins->opcode = OP_ICONV_TO_R_UN;
846 ins->opcode = OP_LCONV_TO_R_UN;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
874 ins->type = STACK_PTR;
875 switch (src1->type) {
877 ins->opcode = OP_ICONV_TO_U;
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
884 ins->opcode = OP_MOVE;
888 ins->opcode = OP_LCONV_TO_U;
891 ins->opcode = OP_FCONV_TO_U;
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
916 ins->type = STACK_R8;
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
975 param_table [STACK_MAX] [STACK_MAX] = {
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
984 switch (args->type) {
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
999 if (!sig->params [i]->byref)
1003 if (sig->params [i]->byref)
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1017 if (sig->params [i]->byref)
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1105 case MONO_TYPE_BOOLEAN:
1108 case MONO_TYPE_CHAR:
1115 case MONO_TYPE_FNPTR:
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1139 g_assert_not_reached ();
1146 array_access_to_klass (int opcode)
1150 return mono_defaults.byte_class;
1152 return mono_defaults.uint16_class;
1155 return mono_defaults.int_class;
1158 return mono_defaults.sbyte_class;
1161 return mono_defaults.int16_class;
1164 return mono_defaults.int32_class;
1166 return mono_defaults.uint32_class;
1169 return mono_defaults.int64_class;
1172 return mono_defaults.single_class;
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1180 g_assert_not_reached ();
1186 * We try to share variables when possible
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1271 bb->out_stack = outb->in_stack;
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1334 /* Find a bblock which has a different in_stack */
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1489 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1504 } else if (cfg->compile_aot) {
1505 int const_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1515 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1521 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1523 int intf_reg = alloc_preg (cfg);
1525 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1526 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1531 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1535 * Variant of the above that takes a register to the class, not the vtable.
1538 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int intf_bit_reg = alloc_preg (cfg);
1542 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1543 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1552 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1556 } else if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1567 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1569 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1573 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1575 if (cfg->compile_aot) {
1576 int const_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1586 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1589 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1592 int rank_reg = alloc_preg (cfg);
1593 int eclass_reg = alloc_preg (cfg);
1595 g_assert (!klass_inst);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1601 if (klass->cast_class == mono_defaults.object_class) {
1602 int parent_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1604 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1607 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class == mono_defaults.enum_class) {
1610 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1611 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1612 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1614 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1615 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1618 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1619 /* Check that the object is a vector too */
1620 int bounds_reg = alloc_preg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1626 int idepth_reg = alloc_preg (cfg);
1627 int stypes_reg = alloc_preg (cfg);
1628 int stype = alloc_preg (cfg);
1630 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1631 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1637 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1644 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1648 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1652 g_assert (val == 0);
1657 if ((size <= 4) && (size <= align)) {
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1666 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1668 #if SIZEOF_REGISTER == 8
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1676 val_reg = alloc_preg (cfg);
1678 if (SIZEOF_REGISTER == 8)
1679 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1681 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1684 /* This could be optimized further if neccesary */
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1693 #if !NO_UNALIGNED_ACCESS
1694 if (SIZEOF_REGISTER == 8) {
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1725 #endif /* DISABLE_JIT */
1728 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1735 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1736 g_assert (size < 10000);
1739 /* This could be optimized further if neccesary */
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1750 #if !NO_UNALIGNED_ACCESS
1751 if (SIZEOF_REGISTER == 8) {
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1792 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1798 type = mini_get_basic_type_from_generic (gsctx, type);
1799 switch (type->type) {
1800 case MONO_TYPE_VOID:
1801 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1804 case MONO_TYPE_BOOLEAN:
1807 case MONO_TYPE_CHAR:
1810 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1814 case MONO_TYPE_FNPTR:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1816 case MONO_TYPE_CLASS:
1817 case MONO_TYPE_STRING:
1818 case MONO_TYPE_OBJECT:
1819 case MONO_TYPE_SZARRAY:
1820 case MONO_TYPE_ARRAY:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1824 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1827 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1828 case MONO_TYPE_VALUETYPE:
1829 if (type->data.klass->enumtype) {
1830 type = mono_class_enum_basetype (type->data.klass);
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_TYPEDBYREF:
1835 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1836 case MONO_TYPE_GENERICINST:
1837 type = &type->data.generic_class->container_class->byval_arg;
1840 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1854 * Returns: non-0 value if arg can't be stored on a target.
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP)
1865 return arg->klass != mono_class_from_mono_type (target);
1866 if (arg->type == STACK_PTR)
1871 simple_type = mono_type_get_underlying_type (target);
1872 switch (simple_type->type) {
1873 case MONO_TYPE_VOID:
1877 case MONO_TYPE_BOOLEAN:
1880 case MONO_TYPE_CHAR:
1883 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1887 /* STACK_MP is needed when setting pinned locals */
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1893 case MONO_TYPE_FNPTR:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1897 case MONO_TYPE_CLASS:
1898 case MONO_TYPE_STRING:
1899 case MONO_TYPE_OBJECT:
1900 case MONO_TYPE_SZARRAY:
1901 case MONO_TYPE_ARRAY:
1902 if (arg->type != STACK_OBJ)
1904 /* FIXME: check type compatibility */
1908 if (arg->type != STACK_I8)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1934 klass = mono_class_from_mono_type (simple_type);
1935 if (klass != arg->klass)
1939 if (arg->type != STACK_OBJ)
1941 /* FIXME: check type compatibility */
1945 case MONO_TYPE_MVAR:
1946 /* FIXME: all the arguments must be references for now,
1947 * later look inside cfg and see if the arg num is
1948 * really a reference
1950 g_assert (cfg->generic_sharing_context);
1951 if (arg->type != STACK_OBJ)
1955 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1961 * Prepare arguments for passing to a function call.
1962 * Return a non-zero value if the arguments can't be passed to the given
1964 * The type checks are not yet complete and some conversions may need
1965 * casts on 32 or 64 bit architectures.
1967 * FIXME: implement this using target_type_is_incompatible ()
1970 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1972 MonoType *simple_type;
1976 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1980 for (i = 0; i < sig->param_count; ++i) {
1981 if (sig->params [i]->byref) {
1982 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1986 simple_type = sig->params [i];
1987 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1989 switch (simple_type->type) {
1990 case MONO_TYPE_VOID:
1995 case MONO_TYPE_BOOLEAN:
1998 case MONO_TYPE_CHAR:
2001 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2007 case MONO_TYPE_FNPTR:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2011 case MONO_TYPE_CLASS:
2012 case MONO_TYPE_STRING:
2013 case MONO_TYPE_OBJECT:
2014 case MONO_TYPE_SZARRAY:
2015 case MONO_TYPE_ARRAY:
2016 if (args [i]->type != STACK_OBJ)
2021 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != STACK_R8)
2029 case MONO_TYPE_VALUETYPE:
2030 if (simple_type->data.klass->enumtype) {
2031 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2034 if (args [i]->type != STACK_VTYPE)
2037 case MONO_TYPE_TYPEDBYREF:
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_GENERICINST:
2042 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2046 g_error ("unknown type 0x%02x in check_call_signature",
2054 callvirt_to_call (int opcode)
2059 case OP_VOIDCALLVIRT:
2068 g_assert_not_reached ();
2075 callvirt_to_call_membase (int opcode)
2079 return OP_CALL_MEMBASE;
2080 case OP_VOIDCALLVIRT:
2081 return OP_VOIDCALL_MEMBASE;
2083 return OP_FCALL_MEMBASE;
2085 return OP_LCALL_MEMBASE;
2087 return OP_VCALL_MEMBASE;
2089 g_assert_not_reached ();
2095 #ifdef MONO_ARCH_HAVE_IMT
2097 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2099 #ifdef MONO_ARCH_IMT_REG
2100 int method_reg = alloc_preg (cfg);
2103 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2104 } else if (cfg->compile_aot) {
2105 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2108 MONO_INST_NEW (cfg, ins, OP_PCONST);
2109 ins->inst_p0 = call->method;
2110 ins->dreg = method_reg;
2111 MONO_ADD_INS (cfg->cbb, ins);
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2116 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2121 static MonoJumpInfo *
2122 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2124 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2128 ji->data.target = target;
2133 inline static MonoCallInst *
2134 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2135 MonoInst **args, int calli, int virtual, int tail)
2138 #ifdef MONO_ARCH_SOFT_FLOAT
2143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2145 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2148 call->signature = sig;
2150 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2153 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2154 call->vret_var = cfg->vret_addr;
2155 //g_assert_not_reached ();
2157 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2158 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2161 temp->backend.is_pinvoke = sig->pinvoke;
2164 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2165 * address of return value to increase optimization opportunities.
2166 * Before vtype decomposition, the dreg of the call ins itself represents the
2167 * fact the call modifies the return value. After decomposition, the call will
2168 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2169 * will be transformed into an LDADDR.
2171 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2172 loada->dreg = alloc_preg (cfg);
2173 loada->inst_p0 = temp;
2174 /* We reference the call too since call->dreg could change during optimization */
2175 loada->inst_p1 = call;
2176 MONO_ADD_INS (cfg->cbb, loada);
2178 call->inst.dreg = temp->dreg;
2180 call->vret_var = loada;
2181 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2182 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2185 if (COMPILE_SOFT_FLOAT (cfg)) {
2187 * If the call has a float argument, we would need to do an r8->r4 conversion using
2188 * an icall, but that cannot be done during the call sequence since it would clobber
2189 * the call registers + the stack. So we do it before emitting the call.
2191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2193 MonoInst *in = call->args [i];
2195 if (i >= sig->hasthis)
2196 t = sig->params [i - sig->hasthis];
2198 t = &mono_defaults.int_class->byval_arg;
2199 t = mono_type_get_underlying_type (t);
2201 if (!t->byref && t->type == MONO_TYPE_R4) {
2202 MonoInst *iargs [1];
2206 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2208 /* The result will be in an int vreg */
2209 call->args [i] = conv;
2216 if (COMPILE_LLVM (cfg))
2217 mono_llvm_emit_call (cfg, call);
2219 mono_arch_emit_call (cfg, call);
2221 mono_arch_emit_call (cfg, call);
2224 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2225 cfg->flags |= MONO_CFG_HAS_CALLS;
2230 inline static MonoInst*
2231 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2233 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2235 call->inst.sreg1 = addr->dreg;
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2242 inline static MonoInst*
2243 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2245 #ifdef MONO_ARCH_RGCTX_REG
2250 rgctx_reg = mono_alloc_preg (cfg);
2251 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2253 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2255 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2256 cfg->uses_rgctx_reg = TRUE;
2257 call->rgctx_reg = TRUE;
2259 return (MonoInst*)call;
2261 g_assert_not_reached ();
2267 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2269 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2272 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2273 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2275 gboolean might_be_remote;
2276 gboolean virtual = this != NULL;
2277 gboolean enable_for_aot = TRUE;
2281 if (method->string_ctor) {
2282 /* Create the real signature */
2283 /* FIXME: Cache these */
2284 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2285 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2290 might_be_remote = this && sig->hasthis &&
2291 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2292 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2294 context_used = mono_method_check_context_used (method);
2295 if (might_be_remote && context_used) {
2298 g_assert (cfg->generic_sharing_context);
2300 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2302 return mono_emit_calli (cfg, sig, args, addr);
2305 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2307 if (might_be_remote)
2308 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2310 call->method = method;
2311 call->inst.flags |= MONO_INST_HAS_METHOD;
2312 call->inst.inst_left = this;
2315 int vtable_reg, slot_reg, this_reg;
2317 this_reg = this->dreg;
2319 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2320 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2321 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2323 /* Make a call to delegate->invoke_impl */
2324 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2325 call->inst.inst_basereg = this_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2327 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2329 return (MonoInst*)call;
2333 if ((!cfg->compile_aot || enable_for_aot) &&
2334 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2335 (MONO_METHOD_IS_FINAL (method) &&
2336 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2337 !(method->klass->marshalbyref && context_used)) {
2339 * the method is not virtual, we just need to ensure this is not null
2340 * and then we can call the method directly.
2342 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2344 * The check above ensures method is not gshared, this is needed since
2345 * gshared methods can't have wrappers.
2347 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2350 if (!method->string_ctor)
2351 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2353 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2360 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2362 * the method is virtual, but we can statically dispatch since either
2363 * it's class or the method itself are sealed.
2364 * But first we need to ensure it's not a null reference.
2366 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2368 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2374 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2376 vtable_reg = alloc_preg (cfg);
2377 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2378 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2380 #ifdef MONO_ARCH_HAVE_IMT
2382 guint32 imt_slot = mono_method_get_imt_slot (method);
2383 emit_imt_argument (cfg, call, imt_arg);
2384 slot_reg = vtable_reg;
2385 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2388 if (slot_reg == -1) {
2389 slot_reg = alloc_preg (cfg);
2390 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2391 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2394 slot_reg = vtable_reg;
2395 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2396 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2397 #ifdef MONO_ARCH_HAVE_IMT
2399 g_assert (mono_method_signature (method)->generic_param_count);
2400 emit_imt_argument (cfg, call, imt_arg);
2405 call->inst.sreg1 = slot_reg;
2406 call->virtual = TRUE;
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2415 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2416 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2418 #ifdef MONO_ARCH_RGCTX_REG
2425 #ifdef MONO_ARCH_RGCTX_REG
2426 rgctx_reg = mono_alloc_preg (cfg);
2427 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2432 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2434 call = (MonoCallInst*)ins;
2436 #ifdef MONO_ARCH_RGCTX_REG
2437 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2438 cfg->uses_rgctx_reg = TRUE;
2439 call->rgctx_reg = TRUE;
2449 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2451 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2455 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2462 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2465 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2467 return (MonoInst*)call;
2471 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2473 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2477 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2481 * mono_emit_abs_call:
2483 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2485 inline static MonoInst*
2486 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2487 MonoMethodSignature *sig, MonoInst **args)
2489 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2493 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2496 if (cfg->abs_patches == NULL)
2497 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2498 g_hash_table_insert (cfg->abs_patches, ji, ji);
2499 ins = mono_emit_native_call (cfg, ji, sig, args);
2500 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2505 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2508 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2512 * Native code might return non register sized integers
2513 * without initializing the upper bits.
2515 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2516 case OP_LOADI1_MEMBASE:
2517 widen_op = OP_ICONV_TO_I1;
2519 case OP_LOADU1_MEMBASE:
2520 widen_op = OP_ICONV_TO_U1;
2522 case OP_LOADI2_MEMBASE:
2523 widen_op = OP_ICONV_TO_I2;
2525 case OP_LOADU2_MEMBASE:
2526 widen_op = OP_ICONV_TO_U2;
2532 if (widen_op != -1) {
2533 int dreg = alloc_preg (cfg);
2536 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2537 widen->type = ins->type;
2547 get_memcpy_method (void)
2549 static MonoMethod *memcpy_method = NULL;
2550 if (!memcpy_method) {
2551 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2553 g_error ("Old corlib found. Install a new one");
2555 return memcpy_method;
2559 * Emit code to copy a valuetype of type @klass whose address is stored in
2560 * @src->dreg to memory whose address is stored at @dest->dreg.
2563 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2565 MonoInst *iargs [3];
2568 MonoMethod *memcpy_method;
2572 * This check breaks with spilled vars... need to handle it during verification anyway.
2573 * g_assert (klass && klass == src->klass && klass == dest->klass);
2577 n = mono_class_native_size (klass, &align);
2579 n = mono_class_value_size (klass, &align);
2581 #if HAVE_WRITE_BARRIERS
2582 /* if native is true there should be no references in the struct */
2583 if (klass->has_references && !native) {
2584 /* Avoid barriers when storing to the stack */
2585 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2586 (dest->opcode == OP_LDADDR))) {
2587 int context_used = 0;
2592 if (cfg->generic_sharing_context)
2593 context_used = mono_class_check_context_used (klass);
2595 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2597 if (cfg->compile_aot) {
2598 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2600 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2601 mono_class_compute_gc_descriptor (klass);
2605 /* FIXME: this does the memcpy as well (or
2606 should), so we don't need the memcpy
2608 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2613 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2614 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2615 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2619 EMIT_NEW_ICONST (cfg, iargs [2], n);
2621 memcpy_method = get_memcpy_method ();
2622 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2627 get_memset_method (void)
2629 static MonoMethod *memset_method = NULL;
2630 if (!memset_method) {
2631 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2633 g_error ("Old corlib found. Install a new one");
2635 return memset_method;
2639 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2641 MonoInst *iargs [3];
2644 MonoMethod *memset_method;
2646 /* FIXME: Optimize this for the case when dest is an LDADDR */
2648 mono_class_init (klass);
2649 n = mono_class_value_size (klass, &align);
2651 if (n <= sizeof (gpointer) * 5) {
2652 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2655 memset_method = get_memset_method ();
2657 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2658 EMIT_NEW_ICONST (cfg, iargs [2], n);
2659 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2664 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2666 MonoInst *this = NULL;
2668 g_assert (cfg->generic_sharing_context);
2670 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2671 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2672 !method->klass->valuetype)
2673 EMIT_NEW_ARGLOAD (cfg, this, 0);
2675 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2676 MonoInst *mrgctx_loc, *mrgctx_var;
2679 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2681 mrgctx_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2685 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2686 MonoInst *vtable_loc, *vtable_var;
2690 vtable_loc = mono_get_vtable_var (cfg);
2691 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2693 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2694 MonoInst *mrgctx_var = vtable_var;
2697 vtable_reg = alloc_preg (cfg);
2698 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2699 vtable_var->type = STACK_PTR;
2705 int vtable_reg, res_reg;
2707 vtable_reg = alloc_preg (cfg);
2708 res_reg = alloc_preg (cfg);
2709 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2714 static MonoJumpInfoRgctxEntry *
2715 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2717 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2718 res->method = method;
2719 res->in_mrgctx = in_mrgctx;
2720 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2721 res->data->type = patch_type;
2722 res->data->data.target = patch_data;
2723 res->info_type = info_type;
2728 static inline MonoInst*
2729 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2731 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2735 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2736 MonoClass *klass, int rgctx_type)
2738 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2739 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2741 return emit_rgctx_fetch (cfg, rgctx, entry);
2745 * emit_get_rgctx_method:
2747 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2748 * normal constants, else emit a load from the rgctx.
2751 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2752 MonoMethod *cmethod, int rgctx_type)
2754 if (!context_used) {
2757 switch (rgctx_type) {
2758 case MONO_RGCTX_INFO_METHOD:
2759 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2761 case MONO_RGCTX_INFO_METHOD_RGCTX:
2762 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2765 g_assert_not_reached ();
2768 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2769 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2771 return emit_rgctx_fetch (cfg, rgctx, entry);
2776 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2777 MonoClassField *field, int rgctx_type)
2779 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2780 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2782 return emit_rgctx_fetch (cfg, rgctx, entry);
2786 * On return the caller must check @klass for load errors.
2789 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2791 MonoInst *vtable_arg;
2793 int context_used = 0;
2795 if (cfg->generic_sharing_context)
2796 context_used = mono_class_check_context_used (klass);
2799 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2800 klass, MONO_RGCTX_INFO_VTABLE);
2802 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2806 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2809 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2810 #ifdef MONO_ARCH_VTABLE_REG
2811 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2812 cfg->uses_vtable_reg = TRUE;
2819 * On return the caller must check @array_class for load errors
2822 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2824 int vtable_reg = alloc_preg (cfg);
2825 int context_used = 0;
2827 if (cfg->generic_sharing_context)
2828 context_used = mono_class_check_context_used (array_class);
2830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2832 if (cfg->opt & MONO_OPT_SHARED) {
2833 int class_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2835 if (cfg->compile_aot) {
2836 int klass_reg = alloc_preg (cfg);
2837 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2838 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2842 } else if (context_used) {
2843 MonoInst *vtable_ins;
2845 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2846 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2848 if (cfg->compile_aot) {
2852 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2854 vt_reg = alloc_preg (cfg);
2855 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2856 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2859 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2865 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2869 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2871 if (mini_get_debug_options ()->better_cast_details) {
2872 int to_klass_reg = alloc_preg (cfg);
2873 int vtable_reg = alloc_preg (cfg);
2874 int klass_reg = alloc_preg (cfg);
2875 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2878 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2882 MONO_ADD_INS (cfg->cbb, tls_get);
2883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2884 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2887 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2893 reset_cast_details (MonoCompile *cfg)
2895 /* Reset the variables holding the cast details */
2896 if (mini_get_debug_options ()->better_cast_details) {
2897 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2899 MONO_ADD_INS (cfg->cbb, tls_get);
2900 /* It is enough to reset the from field */
2901 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2906 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2907 * generic code is generated.
2910 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2912 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2915 MonoInst *rgctx, *addr;
2917 /* FIXME: What if the class is shared? We might not
2918 have to get the address of the method from the
2920 addr = emit_get_rgctx_method (cfg, context_used, method,
2921 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2923 rgctx = emit_get_rgctx (cfg, method, context_used);
2925 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2927 return mono_emit_method_call (cfg, method, &val, NULL);
2932 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2936 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2937 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2938 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2939 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2941 obj_reg = sp [0]->dreg;
2942 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2943 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2945 /* FIXME: generics */
2946 g_assert (klass->rank == 0);
2949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2950 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2956 MonoInst *element_class;
2958 /* This assertion is from the unboxcast insn */
2959 g_assert (klass->rank == 0);
2961 element_class = emit_get_rgctx_klass (cfg, context_used,
2962 klass->element_class, MONO_RGCTX_INFO_KLASS);
2964 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2965 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2967 save_cast_details (cfg, klass->element_class, obj_reg);
2968 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2969 reset_cast_details (cfg);
2972 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2973 MONO_ADD_INS (cfg->cbb, add);
2974 add->type = STACK_MP;
2981 * Returns NULL and set the cfg exception on error.
2984 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2986 MonoInst *iargs [2];
2989 if (cfg->opt & MONO_OPT_SHARED) {
2990 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2991 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2993 alloc_ftn = mono_object_new;
2994 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2995 /* This happens often in argument checking code, eg. throw new FooException... */
2996 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2997 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2998 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3000 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3001 MonoMethod *managed_alloc = NULL;
3005 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3006 cfg->exception_ptr = klass;
3010 #ifndef MONO_CROSS_COMPILE
3011 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3014 if (managed_alloc) {
3015 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3016 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3018 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3020 guint32 lw = vtable->klass->instance_size;
3021 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3022 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3023 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3026 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3030 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3034 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3037 MonoInst *iargs [2];
3038 MonoMethod *managed_alloc = NULL;
3042 FIXME: we cannot get managed_alloc here because we can't get
3043 the class's vtable (because it's not a closed class)
3045 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3046 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3049 if (cfg->opt & MONO_OPT_SHARED) {
3050 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3051 iargs [1] = data_inst;
3052 alloc_ftn = mono_object_new;
3054 if (managed_alloc) {
3055 iargs [0] = data_inst;
3056 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3059 iargs [0] = data_inst;
3060 alloc_ftn = mono_object_new_specific;
3063 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3067 * Returns NULL and set the cfg exception on error.
3070 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3072 MonoInst *alloc, *ins;
3074 if (mono_class_is_nullable (klass)) {
3075 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3076 return mono_emit_method_call (cfg, method, &val, NULL);
3079 alloc = handle_alloc (cfg, klass, TRUE);
3083 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3089 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3091 MonoInst *alloc, *ins;
3093 if (mono_class_is_nullable (klass)) {
3094 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3095 /* FIXME: What if the class is shared? We might not
3096 have to get the method address from the RGCTX. */
3097 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3098 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3099 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3101 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3103 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3105 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3111 // FIXME: This doesn't work yet (class libs tests fail?)
3112 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3115 * Returns NULL and set the cfg exception on error.
3118 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3120 MonoBasicBlock *is_null_bb;
3121 int obj_reg = src->dreg;
3122 int vtable_reg = alloc_preg (cfg);
3123 MonoInst *klass_inst = NULL;
3128 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3129 klass, MONO_RGCTX_INFO_KLASS);
3131 if (is_complex_isinst (klass)) {
3132 /* Complex case, handle by an icall */
3138 args [1] = klass_inst;
3140 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3142 /* Simple case, handled by the code below */
3146 NEW_BBLOCK (cfg, is_null_bb);
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3151 save_cast_details (cfg, klass, obj_reg);
3153 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3155 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3157 int klass_reg = alloc_preg (cfg);
3159 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3161 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3162 /* the remoting code is broken, access the class for now */
3163 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3164 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3166 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3167 cfg->exception_ptr = klass;
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3173 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3175 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3178 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3182 MONO_START_BB (cfg, is_null_bb);
3184 reset_cast_details (cfg);
3190 * Returns NULL and set the cfg exception on error.
3193 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3196 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3197 int obj_reg = src->dreg;
3198 int vtable_reg = alloc_preg (cfg);
3199 int res_reg = alloc_preg (cfg);
3200 MonoInst *klass_inst = NULL;
3203 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3205 if (is_complex_isinst (klass)) {
3208 /* Complex case, handle by an icall */
3214 args [1] = klass_inst;
3216 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3218 /* Simple case, the code below can handle it */
3222 NEW_BBLOCK (cfg, is_null_bb);
3223 NEW_BBLOCK (cfg, false_bb);
3224 NEW_BBLOCK (cfg, end_bb);
3226 /* Do the assignment at the beginning, so the other assignment can be if converted */
3227 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3228 ins->type = STACK_OBJ;
3231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3236 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3237 g_assert (!context_used);
3238 /* the is_null_bb target simply copies the input register to the output */
3239 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3241 int klass_reg = alloc_preg (cfg);
3244 int rank_reg = alloc_preg (cfg);
3245 int eclass_reg = alloc_preg (cfg);
3247 g_assert (!context_used);
3248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3253 if (klass->cast_class == mono_defaults.object_class) {
3254 int parent_reg = alloc_preg (cfg);
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3256 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3257 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3259 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3260 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3261 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3263 } else if (klass->cast_class == mono_defaults.enum_class) {
3264 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3266 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3267 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3269 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3270 /* Check that the object is a vector too */
3271 int bounds_reg = alloc_preg (cfg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3277 /* the is_null_bb target simply copies the input register to the output */
3278 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3280 } else if (mono_class_is_nullable (klass)) {
3281 g_assert (!context_used);
3282 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3283 /* the is_null_bb target simply copies the input register to the output */
3284 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3286 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3287 g_assert (!context_used);
3288 /* the remoting code is broken, access the class for now */
3289 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3290 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3292 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3293 cfg->exception_ptr = klass;
3296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3298 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3302 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3304 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3305 /* the is_null_bb target simply copies the input register to the output */
3306 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3311 MONO_START_BB (cfg, false_bb);
3313 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3316 MONO_START_BB (cfg, is_null_bb);
3318 MONO_START_BB (cfg, end_bb);
3324 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3326 /* This opcode takes as input an object reference and a class, and returns:
3327 0) if the object is an instance of the class,
3328 1) if the object is not instance of the class,
3329 2) if the object is a proxy whose type cannot be determined */
3332 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3333 int obj_reg = src->dreg;
3334 int dreg = alloc_ireg (cfg);
3336 int klass_reg = alloc_preg (cfg);
3338 NEW_BBLOCK (cfg, true_bb);
3339 NEW_BBLOCK (cfg, false_bb);
3340 NEW_BBLOCK (cfg, false2_bb);
3341 NEW_BBLOCK (cfg, end_bb);
3342 NEW_BBLOCK (cfg, no_proxy_bb);
3344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3347 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3348 NEW_BBLOCK (cfg, interface_fail_bb);
3350 tmp_reg = alloc_preg (cfg);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3352 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3353 MONO_START_BB (cfg, interface_fail_bb);
3354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3356 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3358 tmp_reg = alloc_preg (cfg);
3359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3367 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3368 tmp_reg = alloc_preg (cfg);
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3372 tmp_reg = alloc_preg (cfg);
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3377 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3380 MONO_START_BB (cfg, no_proxy_bb);
3382 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3385 MONO_START_BB (cfg, false_bb);
3387 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3390 MONO_START_BB (cfg, false2_bb);
3392 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3395 MONO_START_BB (cfg, true_bb);
3397 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3399 MONO_START_BB (cfg, end_bb);
3402 MONO_INST_NEW (cfg, ins, OP_ICONST);
3404 ins->type = STACK_I4;
3410 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3412 /* This opcode takes as input an object reference and a class, and returns:
3413 0) if the object is an instance of the class,
3414 1) if the object is a proxy whose type cannot be determined
3415 an InvalidCastException exception is thrown otherwhise*/
3418 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3419 int obj_reg = src->dreg;
3420 int dreg = alloc_ireg (cfg);
3421 int tmp_reg = alloc_preg (cfg);
3422 int klass_reg = alloc_preg (cfg);
3424 NEW_BBLOCK (cfg, end_bb);
3425 NEW_BBLOCK (cfg, ok_result_bb);
3427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3430 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3431 NEW_BBLOCK (cfg, interface_fail_bb);
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3434 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3435 MONO_START_BB (cfg, interface_fail_bb);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3438 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3440 tmp_reg = alloc_preg (cfg);
3441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3443 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3445 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3449 NEW_BBLOCK (cfg, no_proxy_bb);
3451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3453 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3455 tmp_reg = alloc_preg (cfg);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3459 tmp_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3464 NEW_BBLOCK (cfg, fail_1_bb);
3466 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3468 MONO_START_BB (cfg, fail_1_bb);
3470 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3473 MONO_START_BB (cfg, no_proxy_bb);
3475 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3478 MONO_START_BB (cfg, ok_result_bb);
3480 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3482 MONO_START_BB (cfg, end_bb);
3485 MONO_INST_NEW (cfg, ins, OP_ICONST);
3487 ins->type = STACK_I4;
3493 * Returns NULL and set the cfg exception on error.
3495 static G_GNUC_UNUSED MonoInst*
3496 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3498 gpointer *trampoline;
3499 MonoInst *obj, *method_ins, *tramp_ins;
3503 obj = handle_alloc (cfg, klass, FALSE);
3507 /* Inline the contents of mono_delegate_ctor */
3509 /* Set target field */
3510 /* Optimize away setting of NULL target */
3511 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3514 /* Set method field */
3515 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3519 * To avoid looking up the compiled code belonging to the target method
3520 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3521 * store it, and we fill it after the method has been compiled.
3523 if (!cfg->compile_aot && !method->dynamic) {
3524 MonoInst *code_slot_ins;
3527 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3529 domain = mono_domain_get ();
3530 mono_domain_lock (domain);
3531 if (!domain_jit_info (domain)->method_code_hash)
3532 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3533 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3535 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3536 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3538 mono_domain_unlock (domain);
3540 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3545 /* Set invoke_impl field */
3546 if (cfg->compile_aot) {
3547 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3549 trampoline = mono_create_delegate_trampoline (klass);
3550 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3552 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3554 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3560 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3562 MonoJitICallInfo *info;
3564 /* Need to register the icall so it gets an icall wrapper */
3565 info = mono_get_array_new_va_icall (rank);
3567 cfg->flags |= MONO_CFG_HAS_VARARGS;
3569 /* mono_array_new_va () needs a vararg calling convention */
3570 cfg->disable_llvm = TRUE;
3572 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3573 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3577 mono_emit_load_got_addr (MonoCompile *cfg)
3579 MonoInst *getaddr, *dummy_use;
3581 if (!cfg->got_var || cfg->got_var_allocated)
3584 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3585 getaddr->dreg = cfg->got_var->dreg;
3587 /* Add it to the start of the first bblock */
3588 if (cfg->bb_entry->code) {
3589 getaddr->next = cfg->bb_entry->code;
3590 cfg->bb_entry->code = getaddr;
3593 MONO_ADD_INS (cfg->bb_entry, getaddr);
3595 cfg->got_var_allocated = TRUE;
3598 * Add a dummy use to keep the got_var alive, since real uses might
3599 * only be generated by the back ends.
3600 * Add it to end_bblock, so the variable's lifetime covers the whole
3602 * It would be better to make the usage of the got var explicit in all
3603 * cases when the backend needs it (i.e. calls, throw etc.), so this
3604 * wouldn't be needed.
3606 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3607 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3610 static int inline_limit;
3611 static gboolean inline_limit_inited;
3614 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3616 MonoMethodHeaderSummary header;
3618 #ifdef MONO_ARCH_SOFT_FLOAT
3619 MonoMethodSignature *sig = mono_method_signature (method);
3623 if (cfg->generic_sharing_context)
3626 if (cfg->inline_depth > 10)
3629 #ifdef MONO_ARCH_HAVE_LMF_OPS
3630 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3631 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3632 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3637 if (!mono_method_get_header_summary (method, &header))
3640 /*runtime, icall and pinvoke are checked by summary call*/
3641 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3642 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3643 (method->klass->marshalbyref) ||
3647 /* also consider num_locals? */
3648 /* Do the size check early to avoid creating vtables */
3649 if (!inline_limit_inited) {
3650 if (getenv ("MONO_INLINELIMIT"))
3651 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3653 inline_limit = INLINE_LENGTH_LIMIT;
3654 inline_limit_inited = TRUE;
3656 if (header.code_size >= inline_limit)
3660 * if we can initialize the class of the method right away, we do,
3661 * otherwise we don't allow inlining if the class needs initialization,
3662 * since it would mean inserting a call to mono_runtime_class_init()
3663 * inside the inlined code
3665 if (!(cfg->opt & MONO_OPT_SHARED)) {
3666 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3667 if (cfg->run_cctors && method->klass->has_cctor) {
3668 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3669 if (!method->klass->runtime_info)
3670 /* No vtable created yet */
3672 vtable = mono_class_vtable (cfg->domain, method->klass);
3675 /* This makes so that inline cannot trigger */
3676 /* .cctors: too many apps depend on them */
3677 /* running with a specific order... */
3678 if (! vtable->initialized)
3680 mono_runtime_class_init (vtable);
3682 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3683 if (!method->klass->runtime_info)
3684 /* No vtable created yet */
3686 vtable = mono_class_vtable (cfg->domain, method->klass);
3689 if (!vtable->initialized)
3694 * If we're compiling for shared code
3695 * the cctor will need to be run at aot method load time, for example,
3696 * or at the end of the compilation of the inlining method.
3698 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3703 * CAS - do not inline methods with declarative security
3704 * Note: this has to be before any possible return TRUE;
3706 if (mono_method_has_declsec (method))
3709 #ifdef MONO_ARCH_SOFT_FLOAT
3711 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3713 for (i = 0; i < sig->param_count; ++i)
3714 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3722 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3724 if (vtable->initialized && !cfg->compile_aot)
3727 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3730 if (!mono_class_needs_cctor_run (vtable->klass, method))
3733 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3734 /* The initialization is already done before the method is called */
3741 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3745 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3747 mono_class_init (klass);
3748 size = mono_class_array_element_size (klass);
3750 mult_reg = alloc_preg (cfg);
3751 array_reg = arr->dreg;
3752 index_reg = index->dreg;
3754 #if SIZEOF_REGISTER == 8
3755 /* The array reg is 64 bits but the index reg is only 32 */
3756 if (COMPILE_LLVM (cfg)) {
3758 index2_reg = index_reg;
3760 index2_reg = alloc_preg (cfg);
3761 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3764 if (index->type == STACK_I8) {
3765 index2_reg = alloc_preg (cfg);
3766 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3768 index2_reg = index_reg;
3772 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3774 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3775 if (size == 1 || size == 2 || size == 4 || size == 8) {
3776 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3778 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3779 ins->type = STACK_PTR;
3785 add_reg = alloc_preg (cfg);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3788 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3789 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3790 ins->type = STACK_PTR;
3791 MONO_ADD_INS (cfg->cbb, ins);
3796 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3798 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3800 int bounds_reg = alloc_preg (cfg);
3801 int add_reg = alloc_preg (cfg);
3802 int mult_reg = alloc_preg (cfg);
3803 int mult2_reg = alloc_preg (cfg);
3804 int low1_reg = alloc_preg (cfg);
3805 int low2_reg = alloc_preg (cfg);
3806 int high1_reg = alloc_preg (cfg);
3807 int high2_reg = alloc_preg (cfg);
3808 int realidx1_reg = alloc_preg (cfg);
3809 int realidx2_reg = alloc_preg (cfg);
3810 int sum_reg = alloc_preg (cfg);
3815 mono_class_init (klass);
3816 size = mono_class_array_element_size (klass);
3818 index1 = index_ins1->dreg;
3819 index2 = index_ins2->dreg;
3821 /* range checking */
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3823 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3827 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3829 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3830 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3831 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3837 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3838 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3839 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3841 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3844 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3845 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3847 ins->type = STACK_MP;
3849 MONO_ADD_INS (cfg->cbb, ins);
3856 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3860 MonoMethod *addr_method;
3863 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3866 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3868 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3869 /* emit_ldelema_2 depends on OP_LMUL */
3870 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3871 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3875 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3876 addr_method = mono_marshal_get_array_address (rank, element_size);
3877 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3882 static MonoBreakPolicy
3883 always_insert_breakpoint (MonoMethod *method)
3885 return MONO_BREAK_POLICY_ALWAYS;
3888 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3891 * mono_set_break_policy:
3892 * policy_callback: the new callback function
3894 * Allow embedders to decide wherther to actually obey breakpoint instructions
3895 * (both break IL instructions and Debugger.Break () method calls), for example
3896 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3897 * untrusted or semi-trusted code.
3899 * @policy_callback will be called every time a break point instruction needs to
3900 * be inserted with the method argument being the method that calls Debugger.Break()
3901 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3902 * if it wants the breakpoint to not be effective in the given method.
3903 * #MONO_BREAK_POLICY_ALWAYS is the default.
3906 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3908 if (policy_callback)
3909 break_policy_func = policy_callback;
3911 break_policy_func = always_insert_breakpoint;
3915 should_insert_brekpoint (MonoMethod *method) {
3916 switch (break_policy_func (method)) {
3917 case MONO_BREAK_POLICY_ALWAYS:
3919 case MONO_BREAK_POLICY_NEVER:
3921 case MONO_BREAK_POLICY_ON_DBG:
3922 return mono_debug_using_mono_debugger ();
3924 g_warning ("Incorrect value returned from break policy callback");
3930 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3932 MonoInst *ins = NULL;
3934 static MonoClass *runtime_helpers_class = NULL;
3935 if (! runtime_helpers_class)
3936 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3937 "System.Runtime.CompilerServices", "RuntimeHelpers");
3939 if (cmethod->klass == mono_defaults.string_class) {
3940 if (strcmp (cmethod->name, "get_Chars") == 0) {
3941 int dreg = alloc_ireg (cfg);
3942 int index_reg = alloc_preg (cfg);
3943 int mult_reg = alloc_preg (cfg);
3944 int add_reg = alloc_preg (cfg);
3946 #if SIZEOF_REGISTER == 8
3947 /* The array reg is 64 bits but the index reg is only 32 */
3948 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3950 index_reg = args [1]->dreg;
3952 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3954 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3955 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3956 add_reg = ins->dreg;
3957 /* Avoid a warning */
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3963 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3964 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3965 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3967 type_from_op (ins, NULL, NULL);
3969 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3970 int dreg = alloc_ireg (cfg);
3971 /* Decompose later to allow more optimizations */
3972 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3973 ins->type = STACK_I4;
3974 cfg->cbb->has_array_access = TRUE;
3975 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3978 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3979 int mult_reg = alloc_preg (cfg);
3980 int add_reg = alloc_preg (cfg);
3982 /* The corlib functions check for oob already. */
3983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3984 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3986 return cfg->cbb->last_ins;
3989 } else if (cmethod->klass == mono_defaults.object_class) {
3991 if (strcmp (cmethod->name, "GetType") == 0) {
3992 int dreg = alloc_preg (cfg);
3993 int vt_reg = alloc_preg (cfg);
3994 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3996 type_from_op (ins, NULL, NULL);
3999 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4000 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4001 int dreg = alloc_ireg (cfg);
4002 int t1 = alloc_ireg (cfg);
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4005 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4006 ins->type = STACK_I4;
4010 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4011 MONO_INST_NEW (cfg, ins, OP_NOP);
4012 MONO_ADD_INS (cfg->cbb, ins);
4016 } else if (cmethod->klass == mono_defaults.array_class) {
4017 if (cmethod->name [0] != 'g')
4020 if (strcmp (cmethod->name, "get_Rank") == 0) {
4021 int dreg = alloc_ireg (cfg);
4022 int vtable_reg = alloc_preg (cfg);
4023 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4024 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4026 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4027 type_from_op (ins, NULL, NULL);
4030 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4031 int dreg = alloc_ireg (cfg);
4033 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4034 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4035 type_from_op (ins, NULL, NULL);
4040 } else if (cmethod->klass == runtime_helpers_class) {
4042 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4043 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4047 } else if (cmethod->klass == mono_defaults.thread_class) {
4048 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4049 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4050 MONO_ADD_INS (cfg->cbb, ins);
4052 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4053 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4054 MONO_ADD_INS (cfg->cbb, ins);
4057 } else if (cmethod->klass == mono_defaults.monitor_class) {
4058 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4059 if (strcmp (cmethod->name, "Enter") == 0) {
4062 if (COMPILE_LLVM (cfg)) {
4064 * Pass the argument normally, the LLVM backend will handle the
4065 * calling convention problems.
4067 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4069 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4070 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4071 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4072 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4075 return (MonoInst*)call;
4076 } else if (strcmp (cmethod->name, "Exit") == 0) {
4079 if (COMPILE_LLVM (cfg)) {
4080 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4082 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4083 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4084 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4085 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4088 return (MonoInst*)call;
4090 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4091 MonoMethod *fast_method = NULL;
4093 /* Avoid infinite recursion */
4094 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4095 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4096 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4099 if (strcmp (cmethod->name, "Enter") == 0 ||
4100 strcmp (cmethod->name, "Exit") == 0)
4101 fast_method = mono_monitor_get_fast_path (cmethod);
4105 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4107 } else if (mini_class_is_system_array (cmethod->klass) &&
4108 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4109 MonoInst *addr, *store, *load;
4110 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4112 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4114 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4116 } else if (cmethod->klass->image == mono_defaults.corlib &&
4117 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4118 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4121 #if SIZEOF_REGISTER == 8
4122 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4123 /* 64 bit reads are already atomic */
4124 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4125 ins->dreg = mono_alloc_preg (cfg);
4126 ins->inst_basereg = args [0]->dreg;
4127 ins->inst_offset = 0;
4128 MONO_ADD_INS (cfg->cbb, ins);
4132 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4133 if (strcmp (cmethod->name, "Increment") == 0) {
4134 MonoInst *ins_iconst;
4137 if (fsig->params [0]->type == MONO_TYPE_I4)
4138 opcode = OP_ATOMIC_ADD_NEW_I4;
4139 #if SIZEOF_REGISTER == 8
4140 else if (fsig->params [0]->type == MONO_TYPE_I8)
4141 opcode = OP_ATOMIC_ADD_NEW_I8;
4144 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4145 ins_iconst->inst_c0 = 1;
4146 ins_iconst->dreg = mono_alloc_ireg (cfg);
4147 MONO_ADD_INS (cfg->cbb, ins_iconst);
4149 MONO_INST_NEW (cfg, ins, opcode);
4150 ins->dreg = mono_alloc_ireg (cfg);
4151 ins->inst_basereg = args [0]->dreg;
4152 ins->inst_offset = 0;
4153 ins->sreg2 = ins_iconst->dreg;
4154 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4155 MONO_ADD_INS (cfg->cbb, ins);
4157 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4158 MonoInst *ins_iconst;
4161 if (fsig->params [0]->type == MONO_TYPE_I4)
4162 opcode = OP_ATOMIC_ADD_NEW_I4;
4163 #if SIZEOF_REGISTER == 8
4164 else if (fsig->params [0]->type == MONO_TYPE_I8)
4165 opcode = OP_ATOMIC_ADD_NEW_I8;
4168 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4169 ins_iconst->inst_c0 = -1;
4170 ins_iconst->dreg = mono_alloc_ireg (cfg);
4171 MONO_ADD_INS (cfg->cbb, ins_iconst);
4173 MONO_INST_NEW (cfg, ins, opcode);
4174 ins->dreg = mono_alloc_ireg (cfg);
4175 ins->inst_basereg = args [0]->dreg;
4176 ins->inst_offset = 0;
4177 ins->sreg2 = ins_iconst->dreg;
4178 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4179 MONO_ADD_INS (cfg->cbb, ins);
4181 } else if (strcmp (cmethod->name, "Add") == 0) {
4184 if (fsig->params [0]->type == MONO_TYPE_I4)
4185 opcode = OP_ATOMIC_ADD_NEW_I4;
4186 #if SIZEOF_REGISTER == 8
4187 else if (fsig->params [0]->type == MONO_TYPE_I8)
4188 opcode = OP_ATOMIC_ADD_NEW_I8;
4192 MONO_INST_NEW (cfg, ins, opcode);
4193 ins->dreg = mono_alloc_ireg (cfg);
4194 ins->inst_basereg = args [0]->dreg;
4195 ins->inst_offset = 0;
4196 ins->sreg2 = args [1]->dreg;
4197 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4198 MONO_ADD_INS (cfg->cbb, ins);
4201 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4203 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4204 if (strcmp (cmethod->name, "Exchange") == 0) {
4206 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4208 if (fsig->params [0]->type == MONO_TYPE_I4)
4209 opcode = OP_ATOMIC_EXCHANGE_I4;
4210 #if SIZEOF_REGISTER == 8
4211 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4212 (fsig->params [0]->type == MONO_TYPE_I))
4213 opcode = OP_ATOMIC_EXCHANGE_I8;
4215 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4216 opcode = OP_ATOMIC_EXCHANGE_I4;
4221 MONO_INST_NEW (cfg, ins, opcode);
4222 ins->dreg = mono_alloc_ireg (cfg);
4223 ins->inst_basereg = args [0]->dreg;
4224 ins->inst_offset = 0;
4225 ins->sreg2 = args [1]->dreg;
4226 MONO_ADD_INS (cfg->cbb, ins);
4228 switch (fsig->params [0]->type) {
4230 ins->type = STACK_I4;
4234 ins->type = STACK_I8;
4236 case MONO_TYPE_OBJECT:
4237 ins->type = STACK_OBJ;
4240 g_assert_not_reached ();
4243 #if HAVE_WRITE_BARRIERS
4245 MonoInst *dummy_use;
4246 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4247 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4248 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4252 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4254 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4255 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4257 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4258 if (fsig->params [1]->type == MONO_TYPE_I4)
4260 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4261 size = sizeof (gpointer);
4262 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4265 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4266 ins->dreg = alloc_ireg (cfg);
4267 ins->sreg1 = args [0]->dreg;
4268 ins->sreg2 = args [1]->dreg;
4269 ins->sreg3 = args [2]->dreg;
4270 ins->type = STACK_I4;
4271 MONO_ADD_INS (cfg->cbb, ins);
4272 } else if (size == 8) {
4273 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4274 ins->dreg = alloc_ireg (cfg);
4275 ins->sreg1 = args [0]->dreg;
4276 ins->sreg2 = args [1]->dreg;
4277 ins->sreg3 = args [2]->dreg;
4278 ins->type = STACK_I8;
4279 MONO_ADD_INS (cfg->cbb, ins);
4281 /* g_assert_not_reached (); */
4283 #if HAVE_WRITE_BARRIERS
4285 MonoInst *dummy_use;
4286 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4287 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4288 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4292 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4296 } else if (cmethod->klass->image == mono_defaults.corlib) {
4297 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4298 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4299 if (should_insert_brekpoint (cfg->method))
4300 MONO_INST_NEW (cfg, ins, OP_BREAK);
4302 MONO_INST_NEW (cfg, ins, OP_NOP);
4303 MONO_ADD_INS (cfg->cbb, ins);
4306 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4307 && strcmp (cmethod->klass->name, "Environment") == 0) {
4309 EMIT_NEW_ICONST (cfg, ins, 1);
4311 EMIT_NEW_ICONST (cfg, ins, 0);
4315 } else if (cmethod->klass == mono_defaults.math_class) {
4317 * There is general branches code for Min/Max, but it does not work for
4319 * http://everything2.com/?node_id=1051618
4323 #ifdef MONO_ARCH_SIMD_INTRINSICS
4324 if (cfg->opt & MONO_OPT_SIMD) {
4325 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4331 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4335 * This entry point could be used later for arbitrary method
4338 inline static MonoInst*
4339 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4340 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4342 if (method->klass == mono_defaults.string_class) {
4343 /* managed string allocation support */
4344 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4345 MonoInst *iargs [2];
4346 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4347 MonoMethod *managed_alloc = NULL;
4349 g_assert (vtable); /*Should not fail since it System.String*/
4350 #ifndef MONO_CROSS_COMPILE
4351 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4355 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4356 iargs [1] = args [0];
4357 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4364 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4366 MonoInst *store, *temp;
4369 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4370 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4373 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4374 * would be different than the MonoInst's used to represent arguments, and
4375 * the ldelema implementation can't deal with that.
4376 * Solution: When ldelema is used on an inline argument, create a var for
4377 * it, emit ldelema on that var, and emit the saving code below in
4378 * inline_method () if needed.
4380 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4381 cfg->args [i] = temp;
4382 /* This uses cfg->args [i] which is set by the preceeding line */
4383 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4384 store->cil_code = sp [0]->cil_code;
4389 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4390 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4392 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4394 check_inline_called_method_name_limit (MonoMethod *called_method)
4397 static char *limit = NULL;
4399 if (limit == NULL) {
4400 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4402 if (limit_string != NULL)
4403 limit = limit_string;
4405 limit = (char *) "";
4408 if (limit [0] != '\0') {
4409 char *called_method_name = mono_method_full_name (called_method, TRUE);
4411 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4412 g_free (called_method_name);
4414 //return (strncmp_result <= 0);
4415 return (strncmp_result == 0);
4422 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4424 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4427 static char *limit = NULL;
4429 if (limit == NULL) {
4430 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4431 if (limit_string != NULL) {
4432 limit = limit_string;
4434 limit = (char *) "";
4438 if (limit [0] != '\0') {
4439 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4441 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4442 g_free (caller_method_name);
4444 //return (strncmp_result <= 0);
4445 return (strncmp_result == 0);
4453 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4454 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4456 MonoInst *ins, *rvar = NULL;
4457 MonoMethodHeader *cheader;
4458 MonoBasicBlock *ebblock, *sbblock;
4460 MonoMethod *prev_inlined_method;
4461 MonoInst **prev_locals, **prev_args;
4462 MonoType **prev_arg_types;
4463 guint prev_real_offset;
4464 GHashTable *prev_cbb_hash;
4465 MonoBasicBlock **prev_cil_offset_to_bb;
4466 MonoBasicBlock *prev_cbb;
4467 unsigned char* prev_cil_start;
4468 guint32 prev_cil_offset_to_bb_len;
4469 MonoMethod *prev_current_method;
4470 MonoGenericContext *prev_generic_context;
4471 gboolean ret_var_set, prev_ret_var_set;
4473 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4475 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4476 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4479 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4480 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4484 if (cfg->verbose_level > 2)
4485 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4487 if (!cmethod->inline_info) {
4488 mono_jit_stats.inlineable_methods++;
4489 cmethod->inline_info = 1;
4492 /* allocate local variables */
4493 cheader = mono_method_get_header (cmethod);
4495 if (cheader == NULL || mono_loader_get_last_error ()) {
4496 mono_loader_clear_error ();
4500 /* allocate space to store the return value */
4501 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4502 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4506 prev_locals = cfg->locals;
4507 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4508 for (i = 0; i < cheader->num_locals; ++i)
4509 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4511 /* allocate start and end blocks */
4512 /* This is needed so if the inline is aborted, we can clean up */
4513 NEW_BBLOCK (cfg, sbblock);
4514 sbblock->real_offset = real_offset;
4516 NEW_BBLOCK (cfg, ebblock);
4517 ebblock->block_num = cfg->num_bblocks++;
4518 ebblock->real_offset = real_offset;
4520 prev_args = cfg->args;
4521 prev_arg_types = cfg->arg_types;
4522 prev_inlined_method = cfg->inlined_method;
4523 cfg->inlined_method = cmethod;
4524 cfg->ret_var_set = FALSE;
4525 cfg->inline_depth ++;
4526 prev_real_offset = cfg->real_offset;
4527 prev_cbb_hash = cfg->cbb_hash;
4528 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4529 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4530 prev_cil_start = cfg->cil_start;
4531 prev_cbb = cfg->cbb;
4532 prev_current_method = cfg->current_method;
4533 prev_generic_context = cfg->generic_context;
4534 prev_ret_var_set = cfg->ret_var_set;
4536 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4538 ret_var_set = cfg->ret_var_set;
4540 cfg->inlined_method = prev_inlined_method;
4541 cfg->real_offset = prev_real_offset;
4542 cfg->cbb_hash = prev_cbb_hash;
4543 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4544 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4545 cfg->cil_start = prev_cil_start;
4546 cfg->locals = prev_locals;
4547 cfg->args = prev_args;
4548 cfg->arg_types = prev_arg_types;
4549 cfg->current_method = prev_current_method;
4550 cfg->generic_context = prev_generic_context;
4551 cfg->ret_var_set = prev_ret_var_set;
4552 cfg->inline_depth --;
4554 if ((costs >= 0 && costs < 60) || inline_allways) {
4555 if (cfg->verbose_level > 2)
4556 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4558 mono_jit_stats.inlined_methods++;
4560 /* always add some code to avoid block split failures */
4561 MONO_INST_NEW (cfg, ins, OP_NOP);
4562 MONO_ADD_INS (prev_cbb, ins);
4564 prev_cbb->next_bb = sbblock;
4565 link_bblock (cfg, prev_cbb, sbblock);
4568 * Get rid of the begin and end bblocks if possible to aid local
4571 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4573 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4574 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4576 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4577 MonoBasicBlock *prev = ebblock->in_bb [0];
4578 mono_merge_basic_blocks (cfg, prev, ebblock);
4580 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4581 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4582 cfg->cbb = prev_cbb;
4590 * If the inlined method contains only a throw, then the ret var is not
4591 * set, so set it to a dummy value.
4594 static double r8_0 = 0.0;
4596 switch (rvar->type) {
4598 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4601 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4606 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4609 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4610 ins->type = STACK_R8;
4611 ins->inst_p0 = (void*)&r8_0;
4612 ins->dreg = rvar->dreg;
4613 MONO_ADD_INS (cfg->cbb, ins);
4616 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4619 g_assert_not_reached ();
4623 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4628 if (cfg->verbose_level > 2)
4629 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4630 cfg->exception_type = MONO_EXCEPTION_NONE;
4631 mono_loader_clear_error ();
4633 /* This gets rid of the newly added bblocks */
4634 cfg->cbb = prev_cbb;
4640 * Some of these comments may well be out-of-date.
4641 * Design decisions: we do a single pass over the IL code (and we do bblock
4642 * splitting/merging in the few cases when it's required: a back jump to an IL
4643 * address that was not already seen as bblock starting point).
4644 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4645 * Complex operations are decomposed in simpler ones right away. We need to let the
4646 * arch-specific code peek and poke inside this process somehow (except when the
4647 * optimizations can take advantage of the full semantic info of coarse opcodes).
4648 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4649 * MonoInst->opcode initially is the IL opcode or some simplification of that
4650 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4651 * opcode with value bigger than OP_LAST.
4652 * At this point the IR can be handed over to an interpreter, a dumb code generator
4653 * or to the optimizing code generator that will translate it to SSA form.
4655 * Profiling directed optimizations.
4656 * We may compile by default with few or no optimizations and instrument the code
4657 * or the user may indicate what methods to optimize the most either in a config file
4658 * or through repeated runs where the compiler applies offline the optimizations to
4659 * each method and then decides if it was worth it.
4662 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4663 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4664 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4665 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4666 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4667 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4668 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4669 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4671 /* offset from br.s -> br like opcodes */
4672 #define BIG_BRANCH_OFFSET 13
4675 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4677 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4679 return b == NULL || b == bb;
4683 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4685 unsigned char *ip = start;
4686 unsigned char *target;
4689 MonoBasicBlock *bblock;
4690 const MonoOpcode *opcode;
4693 cli_addr = ip - start;
4694 i = mono_opcode_value ((const guint8 **)&ip, end);
4697 opcode = &mono_opcodes [i];
4698 switch (opcode->argument) {
4699 case MonoInlineNone:
4702 case MonoInlineString:
4703 case MonoInlineType:
4704 case MonoInlineField:
4705 case MonoInlineMethod:
4708 case MonoShortInlineR:
4715 case MonoShortInlineVar:
4716 case MonoShortInlineI:
4719 case MonoShortInlineBrTarget:
4720 target = start + cli_addr + 2 + (signed char)ip [1];
4721 GET_BBLOCK (cfg, bblock, target);
4724 GET_BBLOCK (cfg, bblock, ip);
4726 case MonoInlineBrTarget:
4727 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4728 GET_BBLOCK (cfg, bblock, target);
4731 GET_BBLOCK (cfg, bblock, ip);
4733 case MonoInlineSwitch: {
4734 guint32 n = read32 (ip + 1);
4737 cli_addr += 5 + 4 * n;
4738 target = start + cli_addr;
4739 GET_BBLOCK (cfg, bblock, target);
4741 for (j = 0; j < n; ++j) {
4742 target = start + cli_addr + (gint32)read32 (ip);
4743 GET_BBLOCK (cfg, bblock, target);
4753 g_assert_not_reached ();
4756 if (i == CEE_THROW) {
4757 unsigned char *bb_start = ip - 1;
4759 /* Find the start of the bblock containing the throw */
4761 while ((bb_start >= start) && !bblock) {
4762 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4766 bblock->out_of_line = 1;
4775 static inline MonoMethod *
4776 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4780 if (m->wrapper_type != MONO_WRAPPER_NONE)
4781 return mono_method_get_wrapper_data (m, token);
4783 method = mono_get_method_full (m->klass->image, token, klass, context);
4788 static inline MonoMethod *
4789 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4791 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4793 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4799 static inline MonoClass*
4800 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4804 if (method->wrapper_type != MONO_WRAPPER_NONE)
4805 klass = mono_method_get_wrapper_data (method, token);
4807 klass = mono_class_get_full (method->klass->image, token, context);
4809 mono_class_init (klass);
4814 * Returns TRUE if the JIT should abort inlining because "callee"
4815 * is influenced by security attributes.
4818 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4822 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4826 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4827 if (result == MONO_JIT_SECURITY_OK)
4830 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4831 /* Generate code to throw a SecurityException before the actual call/link */
4832 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4835 NEW_ICONST (cfg, args [0], 4);
4836 NEW_METHODCONST (cfg, args [1], caller);
4837 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4838 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4839 /* don't hide previous results */
4840 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4841 cfg->exception_data = result;
4849 throw_exception (void)
4851 static MonoMethod *method = NULL;
4854 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4855 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4862 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4864 MonoMethod *thrower = throw_exception ();
4867 EMIT_NEW_PCONST (cfg, args [0], ex);
4868 mono_emit_method_call (cfg, thrower, args, NULL);
4872 * Return the original method is a wrapper is specified. We can only access
4873 * the custom attributes from the original method.
4876 get_original_method (MonoMethod *method)
4878 if (method->wrapper_type == MONO_WRAPPER_NONE)
4881 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4882 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4885 /* in other cases we need to find the original method */
4886 return mono_marshal_method_from_wrapper (method);
4890 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4891 MonoBasicBlock *bblock, unsigned char *ip)
4893 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4894 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4897 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4898 caller = get_original_method (caller);
4902 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4903 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4904 emit_throw_exception (cfg, mono_get_exception_field_access ());
4908 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4909 MonoBasicBlock *bblock, unsigned char *ip)
4911 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4912 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4915 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4916 caller = get_original_method (caller);
4920 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4921 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4922 emit_throw_exception (cfg, mono_get_exception_method_access ());
4926 * Check that the IL instructions at ip are the array initialization
4927 * sequence and return the pointer to the data and the size.
4930 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4933 * newarr[System.Int32]
4935 * ldtoken field valuetype ...
4936 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4938 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4939 guint32 token = read32 (ip + 7);
4940 guint32 field_token = read32 (ip + 2);
4941 guint32 field_index = field_token & 0xffffff;
4943 const char *data_ptr;
4945 MonoMethod *cmethod;
4946 MonoClass *dummy_class;
4947 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4953 *out_field_token = field_token;
4955 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4958 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4960 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4961 case MONO_TYPE_BOOLEAN:
4965 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4966 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4967 case MONO_TYPE_CHAR:
4977 return NULL; /* stupid ARM FP swapped format */
4987 if (size > mono_type_size (field->type, &dummy_align))
4990 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4991 if (!method->klass->image->dynamic) {
4992 field_index = read32 (ip + 2) & 0xffffff;
4993 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4994 data_ptr = mono_image_rva_map (method->klass->image, rva);
4995 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4996 /* for aot code we do the lookup on load */
4997 if (aot && data_ptr)
4998 return GUINT_TO_POINTER (rva);
5000 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5002 data_ptr = mono_field_get_data (field);
5010 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5012 char *method_fname = mono_method_full_name (method, TRUE);
5015 if (mono_method_get_header (method)->code_size == 0)
5016 method_code = g_strdup ("method body is empty.");
5018 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5019 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5020 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5021 g_free (method_fname);
5022 g_free (method_code);
5026 set_exception_object (MonoCompile *cfg, MonoException *exception)
5028 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5029 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5030 cfg->exception_ptr = exception;
5034 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5038 if (cfg->generic_sharing_context)
5039 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5041 type = &klass->byval_arg;
5042 return MONO_TYPE_IS_REFERENCE (type);
5046 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5049 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5050 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5051 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5052 /* Optimize reg-reg moves away */
5054 * Can't optimize other opcodes, since sp[0] might point to
5055 * the last ins of a decomposed opcode.
5057 sp [0]->dreg = (cfg)->locals [n]->dreg;
5059 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5064 * ldloca inhibits many optimizations so try to get rid of it in common
5067 static inline unsigned char *
5068 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5077 local = read16 (ip + 2);
5081 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5082 gboolean skip = FALSE;
5084 /* From the INITOBJ case */
5085 token = read32 (ip + 2);
5086 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5087 CHECK_TYPELOAD (klass);
5088 if (generic_class_is_reference_type (cfg, klass)) {
5089 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5090 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5091 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5092 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5093 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5106 is_exception_class (MonoClass *class)
5109 if (class == mono_defaults.exception_class)
5111 class = class->parent;
5117 * mono_method_to_ir:
5119 * Translate the .net IL into linear IR.
5122 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5123 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5124 guint inline_offset, gboolean is_virtual_call)
5127 MonoInst *ins, **sp, **stack_start;
5128 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5129 MonoSimpleBasicBlock *bb = NULL;
5130 MonoMethod *cmethod, *method_definition;
5131 MonoInst **arg_array;
5132 MonoMethodHeader *header;
5134 guint32 token, ins_flag;
5136 MonoClass *constrained_call = NULL;
5137 unsigned char *ip, *end, *target, *err_pos;
5138 static double r8_0 = 0.0;
5139 MonoMethodSignature *sig;
5140 MonoGenericContext *generic_context = NULL;
5141 MonoGenericContainer *generic_container = NULL;
5142 MonoType **param_types;
5143 int i, n, start_new_bblock, dreg;
5144 int num_calls = 0, inline_costs = 0;
5145 int breakpoint_id = 0;
5147 MonoBoolean security, pinvoke;
5148 MonoSecurityManager* secman = NULL;
5149 MonoDeclSecurityActions actions;
5150 GSList *class_inits = NULL;
5151 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5153 gboolean init_locals, seq_points, skip_dead_blocks;
5155 /* serialization and xdomain stuff may need access to private fields and methods */
5156 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5157 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5158 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5159 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5160 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5161 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5163 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5165 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5166 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5167 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5168 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5170 image = method->klass->image;
5171 header = mono_method_get_header (method);
5172 generic_container = mono_method_get_generic_container (method);
5173 sig = mono_method_signature (method);
5174 num_args = sig->hasthis + sig->param_count;
5175 ip = (unsigned char*)header->code;
5176 cfg->cil_start = ip;
5177 end = ip + header->code_size;
5178 mono_jit_stats.cil_code_size += header->code_size;
5179 init_locals = header->init_locals;
5181 seq_points = cfg->gen_seq_points && cfg->method == method;
5184 * Methods without init_locals set could cause asserts in various passes
5189 method_definition = method;
5190 while (method_definition->is_inflated) {
5191 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5192 method_definition = imethod->declaring;
5195 /* SkipVerification is not allowed if core-clr is enabled */
5196 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5198 dont_verify_stloc = TRUE;
5201 if (!dont_verify && mini_method_verify (cfg, method_definition))
5202 goto exception_exit;
5204 if (mono_debug_using_mono_debugger ())
5205 cfg->keep_cil_nops = TRUE;
5207 if (sig->is_inflated)
5208 generic_context = mono_method_get_context (method);
5209 else if (generic_container)
5210 generic_context = &generic_container->context;
5211 cfg->generic_context = generic_context;
5213 if (!cfg->generic_sharing_context)
5214 g_assert (!sig->has_type_parameters);
5216 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5217 g_assert (method->is_inflated);
5218 g_assert (mono_method_get_context (method)->method_inst);
5220 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5221 g_assert (sig->generic_param_count);
5223 if (cfg->method == method) {
5224 cfg->real_offset = 0;
5226 cfg->real_offset = inline_offset;
5229 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5230 cfg->cil_offset_to_bb_len = header->code_size;
5232 cfg->current_method = method;
5234 if (cfg->verbose_level > 2)
5235 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5237 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5239 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5240 for (n = 0; n < sig->param_count; ++n)
5241 param_types [n + sig->hasthis] = sig->params [n];
5242 cfg->arg_types = param_types;
5244 dont_inline = g_list_prepend (dont_inline, method);
5245 if (cfg->method == method) {
5247 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5248 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5251 NEW_BBLOCK (cfg, start_bblock);
5252 cfg->bb_entry = start_bblock;
5253 start_bblock->cil_code = NULL;
5254 start_bblock->cil_length = 0;
5257 NEW_BBLOCK (cfg, end_bblock);
5258 cfg->bb_exit = end_bblock;
5259 end_bblock->cil_code = NULL;
5260 end_bblock->cil_length = 0;
5261 g_assert (cfg->num_bblocks == 2);
5263 arg_array = cfg->args;
5265 if (header->num_clauses) {
5266 cfg->spvars = g_hash_table_new (NULL, NULL);
5267 cfg->exvars = g_hash_table_new (NULL, NULL);
5269 /* handle exception clauses */
5270 for (i = 0; i < header->num_clauses; ++i) {
5271 MonoBasicBlock *try_bb;
5272 MonoExceptionClause *clause = &header->clauses [i];
5273 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5274 try_bb->real_offset = clause->try_offset;
5275 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5276 tblock->real_offset = clause->handler_offset;
5277 tblock->flags |= BB_EXCEPTION_HANDLER;
5279 link_bblock (cfg, try_bb, tblock);
5281 if (*(ip + clause->handler_offset) == CEE_POP)
5282 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5284 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5285 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5286 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5287 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5288 MONO_ADD_INS (tblock, ins);
5290 /* todo: is a fault block unsafe to optimize? */
5291 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5292 tblock->flags |= BB_EXCEPTION_UNSAFE;
5296 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5298 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5300 /* catch and filter blocks get the exception object on the stack */
5301 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5302 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5303 MonoInst *dummy_use;
5305 /* mostly like handle_stack_args (), but just sets the input args */
5306 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5307 tblock->in_scount = 1;
5308 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5309 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5312 * Add a dummy use for the exvar so its liveness info will be
5316 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5318 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5319 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5320 tblock->flags |= BB_EXCEPTION_HANDLER;
5321 tblock->real_offset = clause->data.filter_offset;
5322 tblock->in_scount = 1;
5323 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5324 /* The filter block shares the exvar with the handler block */
5325 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5326 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5327 MONO_ADD_INS (tblock, ins);
5331 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5332 clause->data.catch_class &&
5333 cfg->generic_sharing_context &&
5334 mono_class_check_context_used (clause->data.catch_class)) {
5336 * In shared generic code with catch
5337 * clauses containing type variables
5338 * the exception handling code has to
5339 * be able to get to the rgctx.
5340 * Therefore we have to make sure that
5341 * the vtable/mrgctx argument (for
5342 * static or generic methods) or the
5343 * "this" argument (for non-static
5344 * methods) are live.
5346 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5347 mini_method_get_context (method)->method_inst ||
5348 method->klass->valuetype) {
5349 mono_get_vtable_var (cfg);
5351 MonoInst *dummy_use;
5353 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5358 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5359 cfg->cbb = start_bblock;
5360 cfg->args = arg_array;
5361 mono_save_args (cfg, sig, inline_args);
5364 /* FIRST CODE BLOCK */
5365 NEW_BBLOCK (cfg, bblock);
5366 bblock->cil_code = ip;
5370 ADD_BBLOCK (cfg, bblock);
5372 if (cfg->method == method) {
5373 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5374 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5375 MONO_INST_NEW (cfg, ins, OP_BREAK);
5376 MONO_ADD_INS (bblock, ins);
5380 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5381 secman = mono_security_manager_get_methods ();
5383 security = (secman && mono_method_has_declsec (method));
5384 /* at this point having security doesn't mean we have any code to generate */
5385 if (security && (cfg->method == method)) {
5386 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5387 * And we do not want to enter the next section (with allocation) if we
5388 * have nothing to generate */
5389 security = mono_declsec_get_demands (method, &actions);
5392 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5393 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5395 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5396 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5397 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5399 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5400 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5404 mono_custom_attrs_free (custom);
5407 custom = mono_custom_attrs_from_class (wrapped->klass);
5408 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5412 mono_custom_attrs_free (custom);
5415 /* not a P/Invoke after all */
5420 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5421 /* we use a separate basic block for the initialization code */
5422 NEW_BBLOCK (cfg, init_localsbb);
5423 cfg->bb_init = init_localsbb;
5424 init_localsbb->real_offset = cfg->real_offset;
5425 start_bblock->next_bb = init_localsbb;
5426 init_localsbb->next_bb = bblock;
5427 link_bblock (cfg, start_bblock, init_localsbb);
5428 link_bblock (cfg, init_localsbb, bblock);
5430 cfg->cbb = init_localsbb;
5432 start_bblock->next_bb = bblock;
5433 link_bblock (cfg, start_bblock, bblock);
5436 /* at this point we know, if security is TRUE, that some code needs to be generated */
5437 if (security && (cfg->method == method)) {
5440 mono_jit_stats.cas_demand_generation++;
5442 if (actions.demand.blob) {
5443 /* Add code for SecurityAction.Demand */
5444 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5445 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5446 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5447 mono_emit_method_call (cfg, secman->demand, args, NULL);
5449 if (actions.noncasdemand.blob) {
5450 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5451 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5452 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5453 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5454 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5455 mono_emit_method_call (cfg, secman->demand, args, NULL);
5457 if (actions.demandchoice.blob) {
5458 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5459 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5460 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5461 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5462 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5466 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5468 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5471 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5472 /* check if this is native code, e.g. an icall or a p/invoke */
5473 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5474 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5476 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5477 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5479 /* if this ia a native call then it can only be JITted from platform code */
5480 if ((icall || pinvk) && method->klass && method->klass->image) {
5481 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5482 MonoException *ex = icall ? mono_get_exception_security () :
5483 mono_get_exception_method_access ();
5484 emit_throw_exception (cfg, ex);
5491 if (header->code_size == 0)
5494 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5499 if (cfg->method == method)
5500 mono_debug_init_method (cfg, bblock, breakpoint_id);
5502 for (n = 0; n < header->num_locals; ++n) {
5503 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5508 /* We force the vtable variable here for all shared methods
5509 for the possibility that they might show up in a stack
5510 trace where their exact instantiation is needed. */
5511 if (cfg->generic_sharing_context && method == cfg->method) {
5512 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5513 mini_method_get_context (method)->method_inst ||
5514 method->klass->valuetype) {
5515 mono_get_vtable_var (cfg);
5517 /* FIXME: Is there a better way to do this?
5518 We need the variable live for the duration
5519 of the whole method. */
5520 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5524 /* add a check for this != NULL to inlined methods */
5525 if (is_virtual_call) {
5528 NEW_ARGLOAD (cfg, arg_ins, 0);
5529 MONO_ADD_INS (cfg->cbb, arg_ins);
5530 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5533 skip_dead_blocks = !dont_verify;
5534 if (skip_dead_blocks) {
5535 bb = mono_basic_block_split (method, &error);
5536 if (!mono_error_ok (&error)) {
5537 mono_error_cleanup (&error);
5543 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5544 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5547 start_new_bblock = 0;
5550 if (cfg->method == method)
5551 cfg->real_offset = ip - header->code;
5553 cfg->real_offset = inline_offset;
5558 if (start_new_bblock) {
5559 bblock->cil_length = ip - bblock->cil_code;
5560 if (start_new_bblock == 2) {
5561 g_assert (ip == tblock->cil_code);
5563 GET_BBLOCK (cfg, tblock, ip);
5565 bblock->next_bb = tblock;
5568 start_new_bblock = 0;
5569 for (i = 0; i < bblock->in_scount; ++i) {
5570 if (cfg->verbose_level > 3)
5571 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5572 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5576 g_slist_free (class_inits);
5579 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5580 link_bblock (cfg, bblock, tblock);
5581 if (sp != stack_start) {
5582 handle_stack_args (cfg, stack_start, sp - stack_start);
5584 CHECK_UNVERIFIABLE (cfg);
5586 bblock->next_bb = tblock;
5589 for (i = 0; i < bblock->in_scount; ++i) {
5590 if (cfg->verbose_level > 3)
5591 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5592 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5595 g_slist_free (class_inits);
5600 if (skip_dead_blocks) {
5601 int ip_offset = ip - header->code;
5603 if (ip_offset == bb->end)
5607 int op_size = mono_opcode_size (ip, end);
5608 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5610 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5612 if (ip_offset + op_size == bb->end) {
5613 MONO_INST_NEW (cfg, ins, OP_NOP);
5614 MONO_ADD_INS (bblock, ins);
5615 start_new_bblock = 1;
5623 * Sequence points are points where the debugger can place a breakpoint.
5624 * Currently, we generate these automatically at points where the IL
5627 if (seq_points && sp == stack_start) {
5628 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5629 MONO_ADD_INS (cfg->cbb, ins);
5632 bblock->real_offset = cfg->real_offset;
5634 if ((cfg->method == method) && cfg->coverage_info) {
5635 guint32 cil_offset = ip - header->code;
5636 cfg->coverage_info->data [cil_offset].cil_code = ip;
5638 /* TODO: Use an increment here */
5639 #if defined(TARGET_X86)
5640 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5641 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5643 MONO_ADD_INS (cfg->cbb, ins);
5645 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5646 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5650 if (cfg->verbose_level > 3)
5651 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5655 if (cfg->keep_cil_nops)
5656 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5658 MONO_INST_NEW (cfg, ins, OP_NOP);
5660 MONO_ADD_INS (bblock, ins);
5663 if (should_insert_brekpoint (cfg->method))
5664 MONO_INST_NEW (cfg, ins, OP_BREAK);
5666 MONO_INST_NEW (cfg, ins, OP_NOP);
5668 MONO_ADD_INS (bblock, ins);
5674 CHECK_STACK_OVF (1);
5675 n = (*ip)-CEE_LDARG_0;
5677 EMIT_NEW_ARGLOAD (cfg, ins, n);
5685 CHECK_STACK_OVF (1);
5686 n = (*ip)-CEE_LDLOC_0;
5688 EMIT_NEW_LOCLOAD (cfg, ins, n);
5697 n = (*ip)-CEE_STLOC_0;
5700 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5702 emit_stloc_ir (cfg, sp, header, n);
5709 CHECK_STACK_OVF (1);
5712 EMIT_NEW_ARGLOAD (cfg, ins, n);
5718 CHECK_STACK_OVF (1);
5721 NEW_ARGLOADA (cfg, ins, n);
5722 MONO_ADD_INS (cfg->cbb, ins);
5732 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5734 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5739 CHECK_STACK_OVF (1);
5742 EMIT_NEW_LOCLOAD (cfg, ins, n);
5746 case CEE_LDLOCA_S: {
5747 unsigned char *tmp_ip;
5749 CHECK_STACK_OVF (1);
5750 CHECK_LOCAL (ip [1]);
5752 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5758 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5767 CHECK_LOCAL (ip [1]);
5768 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5770 emit_stloc_ir (cfg, sp, header, ip [1]);
5775 CHECK_STACK_OVF (1);
5776 EMIT_NEW_PCONST (cfg, ins, NULL);
5777 ins->type = STACK_OBJ;
5782 CHECK_STACK_OVF (1);
5783 EMIT_NEW_ICONST (cfg, ins, -1);
5796 CHECK_STACK_OVF (1);
5797 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5803 CHECK_STACK_OVF (1);
5805 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5811 CHECK_STACK_OVF (1);
5812 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5818 CHECK_STACK_OVF (1);
5819 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5820 ins->type = STACK_I8;
5821 ins->dreg = alloc_dreg (cfg, STACK_I8);
5823 ins->inst_l = (gint64)read64 (ip);
5824 MONO_ADD_INS (bblock, ins);
5830 gboolean use_aotconst = FALSE;
5832 #ifdef TARGET_POWERPC
5833 /* FIXME: Clean this up */
5834 if (cfg->compile_aot)
5835 use_aotconst = TRUE;
5838 /* FIXME: we should really allocate this only late in the compilation process */
5839 f = mono_domain_alloc (cfg->domain, sizeof (float));
5841 CHECK_STACK_OVF (1);
5847 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5849 dreg = alloc_freg (cfg);
5850 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5851 ins->type = STACK_R8;
5853 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5854 ins->type = STACK_R8;
5855 ins->dreg = alloc_dreg (cfg, STACK_R8);
5857 MONO_ADD_INS (bblock, ins);
5867 gboolean use_aotconst = FALSE;
5869 #ifdef TARGET_POWERPC
5870 /* FIXME: Clean this up */
5871 if (cfg->compile_aot)
5872 use_aotconst = TRUE;
5875 /* FIXME: we should really allocate this only late in the compilation process */
5876 d = mono_domain_alloc (cfg->domain, sizeof (double));
5878 CHECK_STACK_OVF (1);
5884 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5886 dreg = alloc_freg (cfg);
5887 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5888 ins->type = STACK_R8;
5890 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5891 ins->type = STACK_R8;
5892 ins->dreg = alloc_dreg (cfg, STACK_R8);
5894 MONO_ADD_INS (bblock, ins);
5903 MonoInst *temp, *store;
5905 CHECK_STACK_OVF (1);
5909 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5910 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5912 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5915 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5928 if (sp [0]->type == STACK_R8)
5929 /* we need to pop the value from the x86 FP stack */
5930 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5939 if (stack_start != sp)
5941 token = read32 (ip + 1);
5942 /* FIXME: check the signature matches */
5943 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5948 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5949 GENERIC_SHARING_FAILURE (CEE_JMP);
5951 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5952 CHECK_CFG_EXCEPTION;
5954 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5956 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5959 /* Handle tail calls similarly to calls */
5960 n = fsig->param_count + fsig->hasthis;
5962 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5963 call->method = cmethod;
5964 call->tail_call = TRUE;
5965 call->signature = mono_method_signature (cmethod);
5966 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5967 call->inst.inst_p0 = cmethod;
5968 for (i = 0; i < n; ++i)
5969 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5971 mono_arch_emit_call (cfg, call);
5972 MONO_ADD_INS (bblock, (MonoInst*)call);
5975 for (i = 0; i < num_args; ++i)
5976 /* Prevent arguments from being optimized away */
5977 arg_array [i]->flags |= MONO_INST_VOLATILE;
5979 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5980 ins = (MonoInst*)call;
5981 ins->inst_p0 = cmethod;
5982 MONO_ADD_INS (bblock, ins);
5986 start_new_bblock = 1;
5991 case CEE_CALLVIRT: {
5992 MonoInst *addr = NULL;
5993 MonoMethodSignature *fsig = NULL;
5995 int virtual = *ip == CEE_CALLVIRT;
5996 int calli = *ip == CEE_CALLI;
5997 gboolean pass_imt_from_rgctx = FALSE;
5998 MonoInst *imt_arg = NULL;
5999 gboolean pass_vtable = FALSE;
6000 gboolean pass_mrgctx = FALSE;
6001 MonoInst *vtable_arg = NULL;
6002 gboolean check_this = FALSE;
6003 gboolean supported_tail_call = FALSE;
6006 token = read32 (ip + 1);
6013 if (method->wrapper_type != MONO_WRAPPER_NONE)
6014 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6016 fsig = mono_metadata_parse_signature (image, token);
6018 n = fsig->param_count + fsig->hasthis;
6020 if (method->dynamic && fsig->pinvoke) {
6024 * This is a call through a function pointer using a pinvoke
6025 * signature. Have to create a wrapper and call that instead.
6026 * FIXME: This is very slow, need to create a wrapper at JIT time
6027 * instead based on the signature.
6029 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6030 EMIT_NEW_PCONST (cfg, args [1], fsig);
6032 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6035 MonoMethod *cil_method;
6037 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6038 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6039 cil_method = cmethod;
6040 } else if (constrained_call) {
6041 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6043 * This is needed since get_method_constrained can't find
6044 * the method in klass representing a type var.
6045 * The type var is guaranteed to be a reference type in this
6048 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6049 cil_method = cmethod;
6050 g_assert (!cmethod->klass->valuetype);
6052 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6055 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6056 cil_method = cmethod;
6061 if (!dont_verify && !cfg->skip_visibility) {
6062 MonoMethod *target_method = cil_method;
6063 if (method->is_inflated) {
6064 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6066 if (!mono_method_can_access_method (method_definition, target_method) &&
6067 !mono_method_can_access_method (method, cil_method))
6068 METHOD_ACCESS_FAILURE;
6071 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6072 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6074 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6075 /* MS.NET seems to silently convert this to a callvirt */
6078 if (!cmethod->klass->inited)
6079 if (!mono_class_init (cmethod->klass))
6082 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6083 mini_class_is_system_array (cmethod->klass)) {
6084 array_rank = cmethod->klass->rank;
6085 fsig = mono_method_signature (cmethod);
6087 if (mono_method_signature (cmethod)->pinvoke) {
6088 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6089 check_for_pending_exc, FALSE);
6090 fsig = mono_method_signature (wrapper);
6091 } else if (constrained_call) {
6092 fsig = mono_method_signature (cmethod);
6094 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6098 mono_save_token_info (cfg, image, token, cil_method);
6100 n = fsig->param_count + fsig->hasthis;
6102 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6103 if (check_linkdemand (cfg, method, cmethod))
6105 CHECK_CFG_EXCEPTION;
6108 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6109 g_assert_not_reached ();
6112 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6115 if (!cfg->generic_sharing_context && cmethod)
6116 g_assert (!mono_method_check_context_used (cmethod));
6120 //g_assert (!virtual || fsig->hasthis);
6124 if (constrained_call) {
6126 * We have the `constrained.' prefix opcode.
6128 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6130 * The type parameter is instantiated as a valuetype,
6131 * but that type doesn't override the method we're
6132 * calling, so we need to box `this'.
6134 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6135 ins->klass = constrained_call;
6136 sp [0] = handle_box (cfg, ins, constrained_call);
6137 CHECK_CFG_EXCEPTION;
6138 } else if (!constrained_call->valuetype) {
6139 int dreg = alloc_preg (cfg);
6142 * The type parameter is instantiated as a reference
6143 * type. We have a managed pointer on the stack, so
6144 * we need to dereference it here.
6146 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6147 ins->type = STACK_OBJ;
6149 } else if (cmethod->klass->valuetype)
6151 constrained_call = NULL;
6154 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6158 * If the callee is a shared method, then its static cctor
6159 * might not get called after the call was patched.
6161 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6162 emit_generic_class_init (cfg, cmethod->klass);
6163 CHECK_TYPELOAD (cmethod->klass);
6166 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6167 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6168 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6169 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6170 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6173 * Pass vtable iff target method might
6174 * be shared, which means that sharing
6175 * is enabled for its class and its
6176 * context is sharable (and it's not a
6179 if (sharing_enabled && context_sharable &&
6180 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6184 if (cmethod && mini_method_get_context (cmethod) &&
6185 mini_method_get_context (cmethod)->method_inst) {
6186 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6187 MonoGenericContext *context = mini_method_get_context (cmethod);
6188 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6190 g_assert (!pass_vtable);
6192 if (sharing_enabled && context_sharable)
6196 if (cfg->generic_sharing_context && cmethod) {
6197 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6199 context_used = mono_method_check_context_used (cmethod);
6201 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6202 /* Generic method interface
6203 calls are resolved via a
6204 helper function and don't
6206 if (!cmethod_context || !cmethod_context->method_inst)
6207 pass_imt_from_rgctx = TRUE;
6211 * If a shared method calls another
6212 * shared method then the caller must
6213 * have a generic sharing context
6214 * because the magic trampoline
6215 * requires it. FIXME: We shouldn't
6216 * have to force the vtable/mrgctx
6217 * variable here. Instead there
6218 * should be a flag in the cfg to
6219 * request a generic sharing context.
6222 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6223 mono_get_vtable_var (cfg);
6228 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6230 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6232 CHECK_TYPELOAD (cmethod->klass);
6233 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6238 g_assert (!vtable_arg);
6240 if (!cfg->compile_aot) {
6242 * emit_get_rgctx_method () calls mono_class_vtable () so check
6243 * for type load errors before.
6245 mono_class_setup_vtable (cmethod->klass);
6246 CHECK_TYPELOAD (cmethod->klass);
6249 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6251 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6252 MONO_METHOD_IS_FINAL (cmethod)) {
6259 if (pass_imt_from_rgctx) {
6260 g_assert (!pass_vtable);
6263 imt_arg = emit_get_rgctx_method (cfg, context_used,
6264 cmethod, MONO_RGCTX_INFO_METHOD);
6268 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6270 /* Calling virtual generic methods */
6271 if (cmethod && virtual &&
6272 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6273 !(MONO_METHOD_IS_FINAL (cmethod) &&
6274 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6275 mono_method_signature (cmethod)->generic_param_count) {
6276 MonoInst *this_temp, *this_arg_temp, *store;
6277 MonoInst *iargs [4];
6279 g_assert (mono_method_signature (cmethod)->is_inflated);
6281 /* Prevent inlining of methods that contain indirect calls */
6284 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6285 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6286 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6287 g_assert (!imt_arg);
6289 g_assert (cmethod->is_inflated);
6290 imt_arg = emit_get_rgctx_method (cfg, context_used,
6291 cmethod, MONO_RGCTX_INFO_METHOD);
6292 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6296 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6297 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6298 MONO_ADD_INS (bblock, store);
6300 /* FIXME: This should be a managed pointer */
6301 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6303 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6304 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6305 cmethod, MONO_RGCTX_INFO_METHOD);
6306 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6307 addr = mono_emit_jit_icall (cfg,
6308 mono_helper_compile_generic_method, iargs);
6310 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6312 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6315 if (!MONO_TYPE_IS_VOID (fsig->ret))
6316 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6323 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6324 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6326 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6330 /* FIXME: runtime generic context pointer for jumps? */
6331 /* FIXME: handle this for generic sharing eventually */
6332 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6335 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6338 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6339 /* Handle tail calls similarly to calls */
6340 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6342 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6343 call->tail_call = TRUE;
6344 call->method = cmethod;
6345 call->signature = mono_method_signature (cmethod);
6348 * We implement tail calls by storing the actual arguments into the
6349 * argument variables, then emitting a CEE_JMP.
6351 for (i = 0; i < n; ++i) {
6352 /* Prevent argument from being register allocated */
6353 arg_array [i]->flags |= MONO_INST_VOLATILE;
6354 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6358 ins = (MonoInst*)call;
6359 ins->inst_p0 = cmethod;
6360 ins->inst_p1 = arg_array [0];
6361 MONO_ADD_INS (bblock, ins);
6362 link_bblock (cfg, bblock, end_bblock);
6363 start_new_bblock = 1;
6364 /* skip CEE_RET as well */
6370 /* Conversion to a JIT intrinsic */
6371 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6372 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6373 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6384 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6385 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6386 mono_method_check_inlining (cfg, cmethod) &&
6387 !g_list_find (dont_inline, cmethod)) {
6389 gboolean allways = FALSE;
6391 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6392 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6393 /* Prevent inlining of methods that call wrappers */
6395 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6399 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6401 cfg->real_offset += 5;
6404 if (!MONO_TYPE_IS_VOID (fsig->ret))
6405 /* *sp is already set by inline_method */
6408 inline_costs += costs;
6414 inline_costs += 10 * num_calls++;
6416 /* Tail recursion elimination */
6417 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6418 gboolean has_vtargs = FALSE;
6421 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6424 /* keep it simple */
6425 for (i = fsig->param_count - 1; i >= 0; i--) {
6426 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6431 for (i = 0; i < n; ++i)
6432 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6433 MONO_INST_NEW (cfg, ins, OP_BR);
6434 MONO_ADD_INS (bblock, ins);
6435 tblock = start_bblock->out_bb [0];
6436 link_bblock (cfg, bblock, tblock);
6437 ins->inst_target_bb = tblock;
6438 start_new_bblock = 1;
6440 /* skip the CEE_RET, too */
6441 if (ip_in_bb (cfg, bblock, ip + 5))
6451 /* Generic sharing */
6452 /* FIXME: only do this for generic methods if
6453 they are not shared! */
6454 if (context_used && !imt_arg && !array_rank &&
6455 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6456 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6457 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6458 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6461 g_assert (cfg->generic_sharing_context && cmethod);
6465 * We are compiling a call to a
6466 * generic method from shared code,
6467 * which means that we have to look up
6468 * the method in the rgctx and do an
6471 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6474 /* Indirect calls */
6476 g_assert (!imt_arg);
6478 if (*ip == CEE_CALL)
6479 g_assert (context_used);
6480 else if (*ip == CEE_CALLI)
6481 g_assert (!vtable_arg);
6483 /* FIXME: what the hell is this??? */
6484 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6485 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6487 /* Prevent inlining of methods with indirect calls */
6491 #ifdef MONO_ARCH_RGCTX_REG
6493 int rgctx_reg = mono_alloc_preg (cfg);
6495 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6496 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6497 call = (MonoCallInst*)ins;
6498 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6499 cfg->uses_rgctx_reg = TRUE;
6500 call->rgctx_reg = TRUE;
6505 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6507 * Instead of emitting an indirect call, emit a direct call
6508 * with the contents of the aotconst as the patch info.
6510 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6512 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6513 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6516 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6519 if (!MONO_TYPE_IS_VOID (fsig->ret))
6520 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6531 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6532 if (sp [fsig->param_count]->type == STACK_OBJ) {
6533 MonoInst *iargs [2];
6536 iargs [1] = sp [fsig->param_count];
6538 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6541 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6542 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6543 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6544 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6546 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6549 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6550 if (!cmethod->klass->element_class->valuetype && !readonly)
6551 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6552 CHECK_TYPELOAD (cmethod->klass);
6555 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6558 g_assert_not_reached ();
6566 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6568 if (!MONO_TYPE_IS_VOID (fsig->ret))
6569 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6579 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6581 } else if (imt_arg) {
6582 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6584 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6587 if (!MONO_TYPE_IS_VOID (fsig->ret))
6588 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6595 if (cfg->method != method) {
6596 /* return from inlined method */
6598 * If in_count == 0, that means the ret is unreachable due to
6599 * being preceeded by a throw. In that case, inline_method () will
6600 * handle setting the return value
6601 * (test case: test_0_inline_throw ()).
6603 if (return_var && cfg->cbb->in_count) {
6607 //g_assert (returnvar != -1);
6608 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6609 cfg->ret_var_set = TRUE;
6613 MonoType *ret_type = mono_method_signature (method)->ret;
6617 * Place a seq point here too even through the IL stack is not
6618 * empty, so a step over on
6621 * will work correctly.
6623 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6624 MONO_ADD_INS (cfg->cbb, ins);
6627 g_assert (!return_var);
6630 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6633 if (!cfg->vret_addr) {
6636 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6638 EMIT_NEW_RETLOADA (cfg, ret_addr);
6640 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6641 ins->klass = mono_class_from_mono_type (ret_type);
6644 #ifdef MONO_ARCH_SOFT_FLOAT
6645 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6646 MonoInst *iargs [1];
6650 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6651 mono_arch_emit_setret (cfg, method, conv);
6653 mono_arch_emit_setret (cfg, method, *sp);
6656 mono_arch_emit_setret (cfg, method, *sp);
6661 if (sp != stack_start)
6663 MONO_INST_NEW (cfg, ins, OP_BR);
6665 ins->inst_target_bb = end_bblock;
6666 MONO_ADD_INS (bblock, ins);
6667 link_bblock (cfg, bblock, end_bblock);
6668 start_new_bblock = 1;
6672 MONO_INST_NEW (cfg, ins, OP_BR);
6674 target = ip + 1 + (signed char)(*ip);
6676 GET_BBLOCK (cfg, tblock, target);
6677 link_bblock (cfg, bblock, tblock);
6678 ins->inst_target_bb = tblock;
6679 if (sp != stack_start) {
6680 handle_stack_args (cfg, stack_start, sp - stack_start);
6682 CHECK_UNVERIFIABLE (cfg);
6684 MONO_ADD_INS (bblock, ins);
6685 start_new_bblock = 1;
6686 inline_costs += BRANCH_COST;
6700 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6702 target = ip + 1 + *(signed char*)ip;
6708 inline_costs += BRANCH_COST;
6712 MONO_INST_NEW (cfg, ins, OP_BR);
6715 target = ip + 4 + (gint32)read32(ip);
6717 GET_BBLOCK (cfg, tblock, target);
6718 link_bblock (cfg, bblock, tblock);
6719 ins->inst_target_bb = tblock;
6720 if (sp != stack_start) {
6721 handle_stack_args (cfg, stack_start, sp - stack_start);
6723 CHECK_UNVERIFIABLE (cfg);
6726 MONO_ADD_INS (bblock, ins);
6728 start_new_bblock = 1;
6729 inline_costs += BRANCH_COST;
6736 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6737 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6738 guint32 opsize = is_short ? 1 : 4;
6740 CHECK_OPSIZE (opsize);
6742 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6745 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6750 GET_BBLOCK (cfg, tblock, target);
6751 link_bblock (cfg, bblock, tblock);
6752 GET_BBLOCK (cfg, tblock, ip);
6753 link_bblock (cfg, bblock, tblock);
6755 if (sp != stack_start) {
6756 handle_stack_args (cfg, stack_start, sp - stack_start);
6757 CHECK_UNVERIFIABLE (cfg);
6760 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6761 cmp->sreg1 = sp [0]->dreg;
6762 type_from_op (cmp, sp [0], NULL);
6765 #if SIZEOF_REGISTER == 4
6766 if (cmp->opcode == OP_LCOMPARE_IMM) {
6767 /* Convert it to OP_LCOMPARE */
6768 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6769 ins->type = STACK_I8;
6770 ins->dreg = alloc_dreg (cfg, STACK_I8);
6772 MONO_ADD_INS (bblock, ins);
6773 cmp->opcode = OP_LCOMPARE;
6774 cmp->sreg2 = ins->dreg;
6777 MONO_ADD_INS (bblock, cmp);
6779 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6780 type_from_op (ins, sp [0], NULL);
6781 MONO_ADD_INS (bblock, ins);
6782 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6783 GET_BBLOCK (cfg, tblock, target);
6784 ins->inst_true_bb = tblock;
6785 GET_BBLOCK (cfg, tblock, ip);
6786 ins->inst_false_bb = tblock;
6787 start_new_bblock = 2;
6790 inline_costs += BRANCH_COST;
6805 MONO_INST_NEW (cfg, ins, *ip);
6807 target = ip + 4 + (gint32)read32(ip);
6813 inline_costs += BRANCH_COST;
6817 MonoBasicBlock **targets;
6818 MonoBasicBlock *default_bblock;
6819 MonoJumpInfoBBTable *table;
6820 int offset_reg = alloc_preg (cfg);
6821 int target_reg = alloc_preg (cfg);
6822 int table_reg = alloc_preg (cfg);
6823 int sum_reg = alloc_preg (cfg);
6824 gboolean use_op_switch;
6828 n = read32 (ip + 1);
6831 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6835 CHECK_OPSIZE (n * sizeof (guint32));
6836 target = ip + n * sizeof (guint32);
6838 GET_BBLOCK (cfg, default_bblock, target);
6840 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6841 for (i = 0; i < n; ++i) {
6842 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6843 targets [i] = tblock;
6847 if (sp != stack_start) {
6849 * Link the current bb with the targets as well, so handle_stack_args
6850 * will set their in_stack correctly.
6852 link_bblock (cfg, bblock, default_bblock);
6853 for (i = 0; i < n; ++i)
6854 link_bblock (cfg, bblock, targets [i]);
6856 handle_stack_args (cfg, stack_start, sp - stack_start);
6858 CHECK_UNVERIFIABLE (cfg);
6861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6865 for (i = 0; i < n; ++i)
6866 link_bblock (cfg, bblock, targets [i]);
6868 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6869 table->table = targets;
6870 table->table_size = n;
6872 use_op_switch = FALSE;
6874 /* ARM implements SWITCH statements differently */
6875 /* FIXME: Make it use the generic implementation */
6876 if (!cfg->compile_aot)
6877 use_op_switch = TRUE;
6880 if (COMPILE_LLVM (cfg))
6881 use_op_switch = TRUE;
6883 cfg->cbb->has_jump_table = 1;
6885 if (use_op_switch) {
6886 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6887 ins->sreg1 = src1->dreg;
6888 ins->inst_p0 = table;
6889 ins->inst_many_bb = targets;
6890 ins->klass = GUINT_TO_POINTER (n);
6891 MONO_ADD_INS (cfg->cbb, ins);
6893 if (sizeof (gpointer) == 8)
6894 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6898 #if SIZEOF_REGISTER == 8
6899 /* The upper word might not be zero, and we add it to a 64 bit address later */
6900 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6903 if (cfg->compile_aot) {
6904 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6906 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6907 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6908 ins->inst_p0 = table;
6909 ins->dreg = table_reg;
6910 MONO_ADD_INS (cfg->cbb, ins);
6913 /* FIXME: Use load_memindex */
6914 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6916 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6918 start_new_bblock = 1;
6919 inline_costs += (BRANCH_COST * 2);
6939 dreg = alloc_freg (cfg);
6942 dreg = alloc_lreg (cfg);
6945 dreg = alloc_preg (cfg);
6948 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6949 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6950 ins->flags |= ins_flag;
6952 MONO_ADD_INS (bblock, ins);
6967 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6968 ins->flags |= ins_flag;
6970 MONO_ADD_INS (bblock, ins);
6972 #if HAVE_WRITE_BARRIERS
6973 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6974 MonoInst *dummy_use;
6975 /* insert call to write barrier */
6976 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6977 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6978 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
6989 MONO_INST_NEW (cfg, ins, (*ip));
6991 ins->sreg1 = sp [0]->dreg;
6992 ins->sreg2 = sp [1]->dreg;
6993 type_from_op (ins, sp [0], sp [1]);
6995 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6997 /* Use the immediate opcodes if possible */
6998 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6999 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7000 if (imm_opcode != -1) {
7001 ins->opcode = imm_opcode;
7002 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7005 sp [1]->opcode = OP_NOP;
7009 MONO_ADD_INS ((cfg)->cbb, (ins));
7011 *sp++ = mono_decompose_opcode (cfg, ins);
7028 MONO_INST_NEW (cfg, ins, (*ip));
7030 ins->sreg1 = sp [0]->dreg;
7031 ins->sreg2 = sp [1]->dreg;
7032 type_from_op (ins, sp [0], sp [1]);
7034 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7035 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7037 /* FIXME: Pass opcode to is_inst_imm */
7039 /* Use the immediate opcodes if possible */
7040 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7043 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7044 if (imm_opcode != -1) {
7045 ins->opcode = imm_opcode;
7046 if (sp [1]->opcode == OP_I8CONST) {
7047 #if SIZEOF_REGISTER == 8
7048 ins->inst_imm = sp [1]->inst_l;
7050 ins->inst_ls_word = sp [1]->inst_ls_word;
7051 ins->inst_ms_word = sp [1]->inst_ms_word;
7055 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7058 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7059 if (sp [1]->next == NULL)
7060 sp [1]->opcode = OP_NOP;
7063 MONO_ADD_INS ((cfg)->cbb, (ins));
7065 *sp++ = mono_decompose_opcode (cfg, ins);
7078 case CEE_CONV_OVF_I8:
7079 case CEE_CONV_OVF_U8:
7083 /* Special case this earlier so we have long constants in the IR */
7084 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7085 int data = sp [-1]->inst_c0;
7086 sp [-1]->opcode = OP_I8CONST;
7087 sp [-1]->type = STACK_I8;
7088 #if SIZEOF_REGISTER == 8
7089 if ((*ip) == CEE_CONV_U8)
7090 sp [-1]->inst_c0 = (guint32)data;
7092 sp [-1]->inst_c0 = data;
7094 sp [-1]->inst_ls_word = data;
7095 if ((*ip) == CEE_CONV_U8)
7096 sp [-1]->inst_ms_word = 0;
7098 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7100 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7107 case CEE_CONV_OVF_I4:
7108 case CEE_CONV_OVF_I1:
7109 case CEE_CONV_OVF_I2:
7110 case CEE_CONV_OVF_I:
7111 case CEE_CONV_OVF_U:
7114 if (sp [-1]->type == STACK_R8) {
7115 ADD_UNOP (CEE_CONV_OVF_I8);
7122 case CEE_CONV_OVF_U1:
7123 case CEE_CONV_OVF_U2:
7124 case CEE_CONV_OVF_U4:
7127 if (sp [-1]->type == STACK_R8) {
7128 ADD_UNOP (CEE_CONV_OVF_U8);
7135 case CEE_CONV_OVF_I1_UN:
7136 case CEE_CONV_OVF_I2_UN:
7137 case CEE_CONV_OVF_I4_UN:
7138 case CEE_CONV_OVF_I8_UN:
7139 case CEE_CONV_OVF_U1_UN:
7140 case CEE_CONV_OVF_U2_UN:
7141 case CEE_CONV_OVF_U4_UN:
7142 case CEE_CONV_OVF_U8_UN:
7143 case CEE_CONV_OVF_I_UN:
7144 case CEE_CONV_OVF_U_UN:
7154 case CEE_ADD_OVF_UN:
7156 case CEE_MUL_OVF_UN:
7158 case CEE_SUB_OVF_UN:
7166 token = read32 (ip + 1);
7167 klass = mini_get_class (method, token, generic_context);
7168 CHECK_TYPELOAD (klass);
7170 if (generic_class_is_reference_type (cfg, klass)) {
7171 MonoInst *store, *load;
7172 int dreg = alloc_preg (cfg);
7174 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7175 load->flags |= ins_flag;
7176 MONO_ADD_INS (cfg->cbb, load);
7178 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7179 store->flags |= ins_flag;
7180 MONO_ADD_INS (cfg->cbb, store);
7182 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7194 token = read32 (ip + 1);
7195 klass = mini_get_class (method, token, generic_context);
7196 CHECK_TYPELOAD (klass);
7198 /* Optimize the common ldobj+stloc combination */
7208 loc_index = ip [5] - CEE_STLOC_0;
7215 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7216 CHECK_LOCAL (loc_index);
7218 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7219 ins->dreg = cfg->locals [loc_index]->dreg;
7225 /* Optimize the ldobj+stobj combination */
7226 /* The reference case ends up being a load+store anyway */
7227 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7232 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7239 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7248 CHECK_STACK_OVF (1);
7250 n = read32 (ip + 1);
7252 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7253 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7254 ins->type = STACK_OBJ;
7257 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7258 MonoInst *iargs [1];
7260 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7261 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7263 if (cfg->opt & MONO_OPT_SHARED) {
7264 MonoInst *iargs [3];
7266 if (cfg->compile_aot) {
7267 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7269 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7270 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7271 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7272 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7273 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7275 if (bblock->out_of_line) {
7276 MonoInst *iargs [2];
7278 if (image == mono_defaults.corlib) {
7280 * Avoid relocations in AOT and save some space by using a
7281 * version of helper_ldstr specialized to mscorlib.
7283 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7284 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7286 /* Avoid creating the string object */
7287 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7288 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7289 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7293 if (cfg->compile_aot) {
7294 NEW_LDSTRCONST (cfg, ins, image, n);
7296 MONO_ADD_INS (bblock, ins);
7299 NEW_PCONST (cfg, ins, NULL);
7300 ins->type = STACK_OBJ;
7301 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7303 MONO_ADD_INS (bblock, ins);
7312 MonoInst *iargs [2];
7313 MonoMethodSignature *fsig;
7316 MonoInst *vtable_arg = NULL;
7319 token = read32 (ip + 1);
7320 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7323 fsig = mono_method_get_signature (cmethod, image, token);
7327 mono_save_token_info (cfg, image, token, cmethod);
7329 if (!mono_class_init (cmethod->klass))
7332 if (cfg->generic_sharing_context)
7333 context_used = mono_method_check_context_used (cmethod);
7335 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7336 if (check_linkdemand (cfg, method, cmethod))
7338 CHECK_CFG_EXCEPTION;
7339 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7340 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7343 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7344 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7345 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7346 mono_class_vtable (cfg->domain, cmethod->klass);
7347 CHECK_TYPELOAD (cmethod->klass);
7349 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7350 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7353 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7354 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7356 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7358 CHECK_TYPELOAD (cmethod->klass);
7359 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7364 n = fsig->param_count;
7368 * Generate smaller code for the common newobj <exception> instruction in
7369 * argument checking code.
7371 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7372 is_exception_class (cmethod->klass) && n <= 2 &&
7373 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7374 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7375 MonoInst *iargs [3];
7377 g_assert (!vtable_arg);
7381 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7384 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7388 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7393 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7396 g_assert_not_reached ();
7404 /* move the args to allow room for 'this' in the first position */
7410 /* check_call_signature () requires sp[0] to be set */
7411 this_ins.type = STACK_OBJ;
7413 if (check_call_signature (cfg, fsig, sp))
7418 if (mini_class_is_system_array (cmethod->klass)) {
7419 g_assert (!vtable_arg);
7421 *sp = emit_get_rgctx_method (cfg, context_used,
7422 cmethod, MONO_RGCTX_INFO_METHOD);
7424 /* Avoid varargs in the common case */
7425 if (fsig->param_count == 1)
7426 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7427 else if (fsig->param_count == 2)
7428 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7429 else if (fsig->param_count == 3)
7430 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7432 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7433 } else if (cmethod->string_ctor) {
7434 g_assert (!context_used);
7435 g_assert (!vtable_arg);
7436 /* we simply pass a null pointer */
7437 EMIT_NEW_PCONST (cfg, *sp, NULL);
7438 /* now call the string ctor */
7439 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7441 MonoInst* callvirt_this_arg = NULL;
7443 if (cmethod->klass->valuetype) {
7444 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7445 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7446 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7451 * The code generated by mini_emit_virtual_call () expects
7452 * iargs [0] to be a boxed instance, but luckily the vcall
7453 * will be transformed into a normal call there.
7455 } else if (context_used) {
7459 if (cfg->opt & MONO_OPT_SHARED)
7460 rgctx_info = MONO_RGCTX_INFO_KLASS;
7462 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7463 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7465 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7468 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7470 CHECK_TYPELOAD (cmethod->klass);
7473 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7474 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7475 * As a workaround, we call class cctors before allocating objects.
7477 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7478 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7479 if (cfg->verbose_level > 2)
7480 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7481 class_inits = g_slist_prepend (class_inits, vtable);
7484 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7487 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7490 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7492 /* Now call the actual ctor */
7493 /* Avoid virtual calls to ctors if possible */
7494 if (cmethod->klass->marshalbyref)
7495 callvirt_this_arg = sp [0];
7497 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7498 mono_method_check_inlining (cfg, cmethod) &&
7499 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7500 !g_list_find (dont_inline, cmethod)) {
7503 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7504 cfg->real_offset += 5;
7507 inline_costs += costs - 5;
7510 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7512 } else if (context_used &&
7513 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7514 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7515 MonoInst *cmethod_addr;
7517 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7518 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7520 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7523 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7524 callvirt_this_arg, NULL, vtable_arg);
7528 if (alloc == NULL) {
7530 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7531 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7545 token = read32 (ip + 1);
7546 klass = mini_get_class (method, token, generic_context);
7547 CHECK_TYPELOAD (klass);
7548 if (sp [0]->type != STACK_OBJ)
7551 if (cfg->generic_sharing_context)
7552 context_used = mono_class_check_context_used (klass);
7554 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7561 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7563 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7567 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7568 MonoMethod *mono_castclass;
7569 MonoInst *iargs [1];
7572 mono_castclass = mono_marshal_get_castclass (klass);
7575 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7576 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7577 g_assert (costs > 0);
7580 cfg->real_offset += 5;
7585 inline_costs += costs;
7588 ins = handle_castclass (cfg, klass, *sp, context_used);
7589 CHECK_CFG_EXCEPTION;
7599 token = read32 (ip + 1);
7600 klass = mini_get_class (method, token, generic_context);
7601 CHECK_TYPELOAD (klass);
7602 if (sp [0]->type != STACK_OBJ)
7605 if (cfg->generic_sharing_context)
7606 context_used = mono_class_check_context_used (klass);
7608 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7615 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7617 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7621 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7622 MonoMethod *mono_isinst;
7623 MonoInst *iargs [1];
7626 mono_isinst = mono_marshal_get_isinst (klass);
7629 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7630 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7631 g_assert (costs > 0);
7634 cfg->real_offset += 5;
7639 inline_costs += costs;
7642 ins = handle_isinst (cfg, klass, *sp, context_used);
7643 CHECK_CFG_EXCEPTION;
7650 case CEE_UNBOX_ANY: {
7654 token = read32 (ip + 1);
7655 klass = mini_get_class (method, token, generic_context);
7656 CHECK_TYPELOAD (klass);
7658 mono_save_token_info (cfg, image, token, klass);
7660 if (cfg->generic_sharing_context)
7661 context_used = mono_class_check_context_used (klass);
7663 if (generic_class_is_reference_type (cfg, klass)) {
7664 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7666 MonoInst *iargs [2];
7671 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7672 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7676 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7677 MonoMethod *mono_castclass;
7678 MonoInst *iargs [1];
7681 mono_castclass = mono_marshal_get_castclass (klass);
7684 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7685 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7687 g_assert (costs > 0);
7690 cfg->real_offset += 5;
7694 inline_costs += costs;
7696 ins = handle_castclass (cfg, klass, *sp, 0);
7697 CHECK_CFG_EXCEPTION;
7705 if (mono_class_is_nullable (klass)) {
7706 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7713 ins = handle_unbox (cfg, klass, sp, context_used);
7719 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7732 token = read32 (ip + 1);
7733 klass = mini_get_class (method, token, generic_context);
7734 CHECK_TYPELOAD (klass);
7736 mono_save_token_info (cfg, image, token, klass);
7738 if (cfg->generic_sharing_context)
7739 context_used = mono_class_check_context_used (klass);
7741 if (generic_class_is_reference_type (cfg, klass)) {
7747 if (klass == mono_defaults.void_class)
7749 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7751 /* frequent check in generic code: box (struct), brtrue */
7752 if (!mono_class_is_nullable (klass) &&
7753 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7754 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7756 MONO_INST_NEW (cfg, ins, OP_BR);
7757 if (*ip == CEE_BRTRUE_S) {
7760 target = ip + 1 + (signed char)(*ip);
7765 target = ip + 4 + (gint)(read32 (ip));
7768 GET_BBLOCK (cfg, tblock, target);
7769 link_bblock (cfg, bblock, tblock);
7770 ins->inst_target_bb = tblock;
7771 GET_BBLOCK (cfg, tblock, ip);
7773 * This leads to some inconsistency, since the two bblocks are
7774 * not really connected, but it is needed for handling stack
7775 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7776 * FIXME: This should only be needed if sp != stack_start, but that
7777 * doesn't work for some reason (test failure in mcs/tests on x86).
7779 link_bblock (cfg, bblock, tblock);
7780 if (sp != stack_start) {
7781 handle_stack_args (cfg, stack_start, sp - stack_start);
7783 CHECK_UNVERIFIABLE (cfg);
7785 MONO_ADD_INS (bblock, ins);
7786 start_new_bblock = 1;
7794 if (cfg->opt & MONO_OPT_SHARED)
7795 rgctx_info = MONO_RGCTX_INFO_KLASS;
7797 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7798 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7799 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7801 *sp++ = handle_box (cfg, val, klass);
7804 CHECK_CFG_EXCEPTION;
7813 token = read32 (ip + 1);
7814 klass = mini_get_class (method, token, generic_context);
7815 CHECK_TYPELOAD (klass);
7817 mono_save_token_info (cfg, image, token, klass);
7819 if (cfg->generic_sharing_context)
7820 context_used = mono_class_check_context_used (klass);
7822 if (mono_class_is_nullable (klass)) {
7825 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7826 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7830 ins = handle_unbox (cfg, klass, sp, context_used);
7840 MonoClassField *field;
7844 if (*ip == CEE_STFLD) {
7851 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7853 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7856 token = read32 (ip + 1);
7857 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7858 field = mono_method_get_wrapper_data (method, token);
7859 klass = field->parent;
7862 field = mono_field_from_token (image, token, &klass, generic_context);
7866 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7867 FIELD_ACCESS_FAILURE;
7868 mono_class_init (klass);
7870 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7871 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7872 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7873 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7876 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7877 if (*ip == CEE_STFLD) {
7878 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7880 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7881 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7882 MonoInst *iargs [5];
7885 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7886 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7887 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7891 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7892 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7893 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7894 g_assert (costs > 0);
7896 cfg->real_offset += 5;
7899 inline_costs += costs;
7901 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7906 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7908 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7910 #if HAVE_WRITE_BARRIERS
7911 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7912 /* insert call to write barrier */
7913 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7914 MonoInst *iargs [2], *dummy_use;
7917 dreg = alloc_preg (cfg);
7918 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7920 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7922 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7926 store->flags |= ins_flag;
7933 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7934 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7935 MonoInst *iargs [4];
7938 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7939 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7940 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7941 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7942 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7943 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7945 g_assert (costs > 0);
7947 cfg->real_offset += 5;
7951 inline_costs += costs;
7953 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7957 if (sp [0]->type == STACK_VTYPE) {
7960 /* Have to compute the address of the variable */
7962 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7964 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7966 g_assert (var->klass == klass);
7968 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7972 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7974 if (*ip == CEE_LDFLDA) {
7975 dreg = alloc_preg (cfg);
7977 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7978 ins->klass = mono_class_from_mono_type (field->type);
7979 ins->type = STACK_MP;
7984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7985 load->flags |= ins_flag;
7986 load->flags |= MONO_INST_FAULT;
7997 MonoClassField *field;
7998 gpointer addr = NULL;
7999 gboolean is_special_static;
8002 token = read32 (ip + 1);
8004 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8005 field = mono_method_get_wrapper_data (method, token);
8006 klass = field->parent;
8009 field = mono_field_from_token (image, token, &klass, generic_context);
8012 mono_class_init (klass);
8013 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8014 FIELD_ACCESS_FAILURE;
8016 /* if the class is Critical then transparent code cannot access it's fields */
8017 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8018 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8021 * We can only support shared generic static
8022 * field access on architectures where the
8023 * trampoline code has been extended to handle
8024 * the generic class init.
8026 #ifndef MONO_ARCH_VTABLE_REG
8027 GENERIC_SHARING_FAILURE (*ip);
8030 if (cfg->generic_sharing_context)
8031 context_used = mono_class_check_context_used (klass);
8033 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8035 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8036 * to be called here.
8038 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8039 mono_class_vtable (cfg->domain, klass);
8040 CHECK_TYPELOAD (klass);
8042 mono_domain_lock (cfg->domain);
8043 if (cfg->domain->special_static_fields)
8044 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8045 mono_domain_unlock (cfg->domain);
8047 is_special_static = mono_class_field_is_special_static (field);
8049 /* Generate IR to compute the field address */
8050 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8052 * Fast access to TLS data
8053 * Inline version of get_thread_static_data () in
8057 int idx, static_data_reg, array_reg, dreg;
8058 MonoInst *thread_ins;
8060 // offset &= 0x7fffffff;
8061 // idx = (offset >> 24) - 1;
8062 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8064 thread_ins = mono_get_thread_intrinsic (cfg);
8065 MONO_ADD_INS (cfg->cbb, thread_ins);
8066 static_data_reg = alloc_ireg (cfg);
8067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8069 if (cfg->compile_aot) {
8070 int offset_reg, offset2_reg, idx_reg;
8072 /* For TLS variables, this will return the TLS offset */
8073 EMIT_NEW_SFLDACONST (cfg, ins, field);
8074 offset_reg = ins->dreg;
8075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8076 idx_reg = alloc_ireg (cfg);
8077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8081 array_reg = alloc_ireg (cfg);
8082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8083 offset2_reg = alloc_ireg (cfg);
8084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8085 dreg = alloc_ireg (cfg);
8086 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8088 offset = (gsize)addr & 0x7fffffff;
8089 idx = (offset >> 24) - 1;
8091 array_reg = alloc_ireg (cfg);
8092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8093 dreg = alloc_ireg (cfg);
8094 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8096 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8097 (cfg->compile_aot && is_special_static) ||
8098 (context_used && is_special_static)) {
8099 MonoInst *iargs [2];
8101 g_assert (field->parent);
8102 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8104 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8105 field, MONO_RGCTX_INFO_CLASS_FIELD);
8107 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8109 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8110 } else if (context_used) {
8111 MonoInst *static_data;
8114 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8115 method->klass->name_space, method->klass->name, method->name,
8116 depth, field->offset);
8119 if (mono_class_needs_cctor_run (klass, method)) {
8123 vtable = emit_get_rgctx_klass (cfg, context_used,
8124 klass, MONO_RGCTX_INFO_VTABLE);
8126 // FIXME: This doesn't work since it tries to pass the argument
8127 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8129 * The vtable pointer is always passed in a register regardless of
8130 * the calling convention, so assign it manually, and make a call
8131 * using a signature without parameters.
8133 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8134 #ifdef MONO_ARCH_VTABLE_REG
8135 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8136 cfg->uses_vtable_reg = TRUE;
8143 * The pointer we're computing here is
8145 * super_info.static_data + field->offset
8147 static_data = emit_get_rgctx_klass (cfg, context_used,
8148 klass, MONO_RGCTX_INFO_STATIC_DATA);
8150 if (field->offset == 0) {
8153 int addr_reg = mono_alloc_preg (cfg);
8154 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8156 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8157 MonoInst *iargs [2];
8159 g_assert (field->parent);
8160 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8161 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8162 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8164 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8166 CHECK_TYPELOAD (klass);
8168 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8169 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8170 if (cfg->verbose_level > 2)
8171 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8172 class_inits = g_slist_prepend (class_inits, vtable);
8174 if (cfg->run_cctors) {
8176 /* This makes so that inline cannot trigger */
8177 /* .cctors: too many apps depend on them */
8178 /* running with a specific order... */
8179 if (! vtable->initialized)
8181 ex = mono_runtime_class_init_full (vtable, FALSE);
8183 set_exception_object (cfg, ex);
8184 goto exception_exit;
8188 addr = (char*)vtable->data + field->offset;
8190 if (cfg->compile_aot)
8191 EMIT_NEW_SFLDACONST (cfg, ins, field);
8193 EMIT_NEW_PCONST (cfg, ins, addr);
8195 MonoInst *iargs [1];
8196 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8197 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8201 /* Generate IR to do the actual load/store operation */
8203 if (*ip == CEE_LDSFLDA) {
8204 ins->klass = mono_class_from_mono_type (field->type);
8205 ins->type = STACK_PTR;
8207 } else if (*ip == CEE_STSFLD) {
8212 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8213 store->flags |= ins_flag;
8215 gboolean is_const = FALSE;
8216 MonoVTable *vtable = NULL;
8218 if (!context_used) {
8219 vtable = mono_class_vtable (cfg->domain, klass);
8220 CHECK_TYPELOAD (klass);
8222 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8223 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8224 gpointer addr = (char*)vtable->data + field->offset;
8225 int ro_type = field->type->type;
8226 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8227 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8229 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8232 case MONO_TYPE_BOOLEAN:
8234 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8238 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8241 case MONO_TYPE_CHAR:
8243 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8247 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8252 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8256 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8259 #ifndef HAVE_MOVING_COLLECTOR
8262 case MONO_TYPE_STRING:
8263 case MONO_TYPE_OBJECT:
8264 case MONO_TYPE_CLASS:
8265 case MONO_TYPE_SZARRAY:
8267 case MONO_TYPE_FNPTR:
8268 case MONO_TYPE_ARRAY:
8269 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8270 type_to_eval_stack_type ((cfg), field->type, *sp);
8276 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8281 case MONO_TYPE_VALUETYPE:
8291 CHECK_STACK_OVF (1);
8293 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8294 load->flags |= ins_flag;
8307 token = read32 (ip + 1);
8308 klass = mini_get_class (method, token, generic_context);
8309 CHECK_TYPELOAD (klass);
8310 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8311 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8322 const char *data_ptr;
8324 guint32 field_token;
8330 token = read32 (ip + 1);
8332 klass = mini_get_class (method, token, generic_context);
8333 CHECK_TYPELOAD (klass);
8335 if (cfg->generic_sharing_context)
8336 context_used = mono_class_check_context_used (klass);
8338 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8339 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8340 ins->sreg1 = sp [0]->dreg;
8341 ins->type = STACK_I4;
8342 ins->dreg = alloc_ireg (cfg);
8343 MONO_ADD_INS (cfg->cbb, ins);
8344 *sp = mono_decompose_opcode (cfg, ins);
8349 MonoClass *array_class = mono_array_class_get (klass, 1);
8350 /* FIXME: we cannot get a managed
8351 allocator because we can't get the
8352 open generic class's vtable. We
8353 have the same problem in
8354 handle_alloc_from_inst(). This
8355 needs to be solved so that we can
8356 have managed allocs of shared
8359 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8360 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8362 MonoMethod *managed_alloc = NULL;
8364 /* FIXME: Decompose later to help abcrem */
8367 args [0] = emit_get_rgctx_klass (cfg, context_used,
8368 array_class, MONO_RGCTX_INFO_VTABLE);
8373 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8375 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8377 if (cfg->opt & MONO_OPT_SHARED) {
8378 /* Decompose now to avoid problems with references to the domainvar */
8379 MonoInst *iargs [3];
8381 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8382 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8385 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8387 /* Decompose later since it is needed by abcrem */
8388 MonoClass *array_type = mono_array_class_get (klass, 1);
8389 mono_class_vtable (cfg->domain, array_type);
8390 CHECK_TYPELOAD (array_type);
8392 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8393 ins->dreg = alloc_preg (cfg);
8394 ins->sreg1 = sp [0]->dreg;
8395 ins->inst_newa_class = klass;
8396 ins->type = STACK_OBJ;
8398 MONO_ADD_INS (cfg->cbb, ins);
8399 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8400 cfg->cbb->has_array_access = TRUE;
8402 /* Needed so mono_emit_load_get_addr () gets called */
8403 mono_get_got_var (cfg);
8413 * we inline/optimize the initialization sequence if possible.
8414 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8415 * for small sizes open code the memcpy
8416 * ensure the rva field is big enough
8418 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8419 MonoMethod *memcpy_method = get_memcpy_method ();
8420 MonoInst *iargs [3];
8421 int add_reg = alloc_preg (cfg);
8423 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8424 if (cfg->compile_aot) {
8425 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8427 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8429 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8430 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8439 if (sp [0]->type != STACK_OBJ)
8442 dreg = alloc_preg (cfg);
8443 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8444 ins->dreg = alloc_preg (cfg);
8445 ins->sreg1 = sp [0]->dreg;
8446 ins->type = STACK_I4;
8447 MONO_ADD_INS (cfg->cbb, ins);
8448 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8449 cfg->cbb->has_array_access = TRUE;
8457 if (sp [0]->type != STACK_OBJ)
8460 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8462 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8463 CHECK_TYPELOAD (klass);
8464 /* we need to make sure that this array is exactly the type it needs
8465 * to be for correctness. the wrappers are lax with their usage
8466 * so we need to ignore them here
8468 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8469 MonoClass *array_class = mono_array_class_get (klass, 1);
8470 mini_emit_check_array_type (cfg, sp [0], array_class);
8471 CHECK_TYPELOAD (array_class);
8475 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8490 case CEE_LDELEM_REF: {
8496 if (*ip == CEE_LDELEM) {
8498 token = read32 (ip + 1);
8499 klass = mini_get_class (method, token, generic_context);
8500 CHECK_TYPELOAD (klass);
8501 mono_class_init (klass);
8504 klass = array_access_to_klass (*ip);
8506 if (sp [0]->type != STACK_OBJ)
8509 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8511 if (sp [1]->opcode == OP_ICONST) {
8512 int array_reg = sp [0]->dreg;
8513 int index_reg = sp [1]->dreg;
8514 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8516 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8517 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8519 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8520 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8523 if (*ip == CEE_LDELEM)
8536 case CEE_STELEM_REF:
8543 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8545 if (*ip == CEE_STELEM) {
8547 token = read32 (ip + 1);
8548 klass = mini_get_class (method, token, generic_context);
8549 CHECK_TYPELOAD (klass);
8550 mono_class_init (klass);
8553 klass = array_access_to_klass (*ip);
8555 if (sp [0]->type != STACK_OBJ)
8558 /* storing a NULL doesn't need any of the complex checks in stelemref */
8559 if (generic_class_is_reference_type (cfg, klass) &&
8560 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8561 MonoMethod* helper = mono_marshal_get_stelemref ();
8562 MonoInst *iargs [3];
8564 if (sp [0]->type != STACK_OBJ)
8566 if (sp [2]->type != STACK_OBJ)
8573 mono_emit_method_call (cfg, helper, iargs, NULL);
8575 if (sp [1]->opcode == OP_ICONST) {
8576 int array_reg = sp [0]->dreg;
8577 int index_reg = sp [1]->dreg;
8578 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8580 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8581 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8583 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8584 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8588 if (*ip == CEE_STELEM)
8595 case CEE_CKFINITE: {
8599 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8600 ins->sreg1 = sp [0]->dreg;
8601 ins->dreg = alloc_freg (cfg);
8602 ins->type = STACK_R8;
8603 MONO_ADD_INS (bblock, ins);
8605 *sp++ = mono_decompose_opcode (cfg, ins);
8610 case CEE_REFANYVAL: {
8611 MonoInst *src_var, *src;
8613 int klass_reg = alloc_preg (cfg);
8614 int dreg = alloc_preg (cfg);
8617 MONO_INST_NEW (cfg, ins, *ip);
8620 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8621 CHECK_TYPELOAD (klass);
8622 mono_class_init (klass);
8624 if (cfg->generic_sharing_context)
8625 context_used = mono_class_check_context_used (klass);
8628 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8630 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8631 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8635 MonoInst *klass_ins;
8637 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8638 klass, MONO_RGCTX_INFO_KLASS);
8641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8642 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8644 mini_emit_class_check (cfg, klass_reg, klass);
8646 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8647 ins->type = STACK_MP;
8652 case CEE_MKREFANY: {
8653 MonoInst *loc, *addr;
8656 MONO_INST_NEW (cfg, ins, *ip);
8659 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8660 CHECK_TYPELOAD (klass);
8661 mono_class_init (klass);
8663 if (cfg->generic_sharing_context)
8664 context_used = mono_class_check_context_used (klass);
8666 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8667 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8670 MonoInst *const_ins;
8671 int type_reg = alloc_preg (cfg);
8673 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8677 } else if (cfg->compile_aot) {
8678 int const_reg = alloc_preg (cfg);
8679 int type_reg = alloc_preg (cfg);
8681 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8686 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8687 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8691 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8692 ins->type = STACK_VTYPE;
8693 ins->klass = mono_defaults.typed_reference_class;
8700 MonoClass *handle_class;
8702 CHECK_STACK_OVF (1);
8705 n = read32 (ip + 1);
8707 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8708 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8709 handle = mono_method_get_wrapper_data (method, n);
8710 handle_class = mono_method_get_wrapper_data (method, n + 1);
8711 if (handle_class == mono_defaults.typehandle_class)
8712 handle = &((MonoClass*)handle)->byval_arg;
8715 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8719 mono_class_init (handle_class);
8720 if (cfg->generic_sharing_context) {
8721 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8722 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8723 /* This case handles ldtoken
8724 of an open type, like for
8727 } else if (handle_class == mono_defaults.typehandle_class) {
8728 /* If we get a MONO_TYPE_CLASS
8729 then we need to provide the
8731 instantiation of it. */
8732 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8735 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8736 } else if (handle_class == mono_defaults.fieldhandle_class)
8737 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8738 else if (handle_class == mono_defaults.methodhandle_class)
8739 context_used = mono_method_check_context_used (handle);
8741 g_assert_not_reached ();
8744 if ((cfg->opt & MONO_OPT_SHARED) &&
8745 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8746 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8747 MonoInst *addr, *vtvar, *iargs [3];
8748 int method_context_used;
8750 if (cfg->generic_sharing_context)
8751 method_context_used = mono_method_check_context_used (method);
8753 method_context_used = 0;
8755 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8757 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8758 EMIT_NEW_ICONST (cfg, iargs [1], n);
8759 if (method_context_used) {
8760 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8761 method, MONO_RGCTX_INFO_METHOD);
8762 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8764 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8765 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8767 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8771 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8773 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8774 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8775 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8776 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8777 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8778 MonoClass *tclass = mono_class_from_mono_type (handle);
8780 mono_class_init (tclass);
8782 ins = emit_get_rgctx_klass (cfg, context_used,
8783 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8784 } else if (cfg->compile_aot) {
8785 if (method->wrapper_type) {
8786 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8787 /* Special case for static synchronized wrappers */
8788 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8790 /* FIXME: n is not a normal token */
8791 cfg->disable_aot = TRUE;
8792 EMIT_NEW_PCONST (cfg, ins, NULL);
8795 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8798 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8800 ins->type = STACK_OBJ;
8801 ins->klass = cmethod->klass;
8804 MonoInst *addr, *vtvar;
8806 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8809 if (handle_class == mono_defaults.typehandle_class) {
8810 ins = emit_get_rgctx_klass (cfg, context_used,
8811 mono_class_from_mono_type (handle),
8812 MONO_RGCTX_INFO_TYPE);
8813 } else if (handle_class == mono_defaults.methodhandle_class) {
8814 ins = emit_get_rgctx_method (cfg, context_used,
8815 handle, MONO_RGCTX_INFO_METHOD);
8816 } else if (handle_class == mono_defaults.fieldhandle_class) {
8817 ins = emit_get_rgctx_field (cfg, context_used,
8818 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8820 g_assert_not_reached ();
8822 } else if (cfg->compile_aot) {
8823 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8825 EMIT_NEW_PCONST (cfg, ins, handle);
8827 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8829 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8839 MONO_INST_NEW (cfg, ins, OP_THROW);
8841 ins->sreg1 = sp [0]->dreg;
8843 bblock->out_of_line = TRUE;
8844 MONO_ADD_INS (bblock, ins);
8845 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8846 MONO_ADD_INS (bblock, ins);
8849 link_bblock (cfg, bblock, end_bblock);
8850 start_new_bblock = 1;
8852 case CEE_ENDFINALLY:
8853 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8854 MONO_ADD_INS (bblock, ins);
8856 start_new_bblock = 1;
8859 * Control will leave the method so empty the stack, otherwise
8860 * the next basic block will start with a nonempty stack.
8862 while (sp != stack_start) {
8870 if (*ip == CEE_LEAVE) {
8872 target = ip + 5 + (gint32)read32(ip + 1);
8875 target = ip + 2 + (signed char)(ip [1]);
8878 /* empty the stack */
8879 while (sp != stack_start) {
8884 * If this leave statement is in a catch block, check for a
8885 * pending exception, and rethrow it if necessary.
8886 * We avoid doing this in runtime invoke wrappers, since those are called
8887 * by native code which excepts the wrapper to catch all exceptions.
8889 for (i = 0; i < header->num_clauses; ++i) {
8890 MonoExceptionClause *clause = &header->clauses [i];
8893 * Use <= in the final comparison to handle clauses with multiple
8894 * leave statements, like in bug #78024.
8895 * The ordering of the exception clauses guarantees that we find the
8898 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8900 MonoBasicBlock *dont_throw;
8905 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8908 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8910 NEW_BBLOCK (cfg, dont_throw);
8913 * Currently, we allways rethrow the abort exception, despite the
8914 * fact that this is not correct. See thread6.cs for an example.
8915 * But propagating the abort exception is more important than
8916 * getting the sematics right.
8918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8919 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8920 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8922 MONO_START_BB (cfg, dont_throw);
8927 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8929 for (tmp = handlers; tmp; tmp = tmp->next) {
8931 link_bblock (cfg, bblock, tblock);
8932 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8933 ins->inst_target_bb = tblock;
8934 MONO_ADD_INS (bblock, ins);
8935 bblock->has_call_handler = 1;
8936 if (COMPILE_LLVM (cfg)) {
8937 MonoBasicBlock *target_bb;
8940 * Link the finally bblock with the target, since it will
8941 * conceptually branch there.
8942 * FIXME: Have to link the bblock containing the endfinally.
8944 GET_BBLOCK (cfg, target_bb, target);
8945 link_bblock (cfg, tblock, target_bb);
8948 g_list_free (handlers);
8951 MONO_INST_NEW (cfg, ins, OP_BR);
8952 MONO_ADD_INS (bblock, ins);
8953 GET_BBLOCK (cfg, tblock, target);
8954 link_bblock (cfg, bblock, tblock);
8955 ins->inst_target_bb = tblock;
8956 start_new_bblock = 1;
8958 if (*ip == CEE_LEAVE)
8967 * Mono specific opcodes
8969 case MONO_CUSTOM_PREFIX: {
8971 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8975 case CEE_MONO_ICALL: {
8977 MonoJitICallInfo *info;
8979 token = read32 (ip + 2);
8980 func = mono_method_get_wrapper_data (method, token);
8981 info = mono_find_jit_icall_by_addr (func);
8984 CHECK_STACK (info->sig->param_count);
8985 sp -= info->sig->param_count;
8987 ins = mono_emit_jit_icall (cfg, info->func, sp);
8988 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8992 inline_costs += 10 * num_calls++;
8996 case CEE_MONO_LDPTR: {
8999 CHECK_STACK_OVF (1);
9001 token = read32 (ip + 2);
9003 ptr = mono_method_get_wrapper_data (method, token);
9004 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9005 MonoJitICallInfo *callinfo;
9006 const char *icall_name;
9008 icall_name = method->name + strlen ("__icall_wrapper_");
9009 g_assert (icall_name);
9010 callinfo = mono_find_jit_icall_by_name (icall_name);
9011 g_assert (callinfo);
9013 if (ptr == callinfo->func) {
9014 /* Will be transformed into an AOTCONST later */
9015 EMIT_NEW_PCONST (cfg, ins, ptr);
9021 /* FIXME: Generalize this */
9022 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9023 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9028 EMIT_NEW_PCONST (cfg, ins, ptr);
9031 inline_costs += 10 * num_calls++;
9032 /* Can't embed random pointers into AOT code */
9033 cfg->disable_aot = 1;
9036 case CEE_MONO_ICALL_ADDR: {
9037 MonoMethod *cmethod;
9040 CHECK_STACK_OVF (1);
9042 token = read32 (ip + 2);
9044 cmethod = mono_method_get_wrapper_data (method, token);
9046 if (cfg->compile_aot) {
9047 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9049 ptr = mono_lookup_internal_call (cmethod);
9051 EMIT_NEW_PCONST (cfg, ins, ptr);
9057 case CEE_MONO_VTADDR: {
9058 MonoInst *src_var, *src;
9064 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9065 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9070 case CEE_MONO_NEWOBJ: {
9071 MonoInst *iargs [2];
9073 CHECK_STACK_OVF (1);
9075 token = read32 (ip + 2);
9076 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9077 mono_class_init (klass);
9078 NEW_DOMAINCONST (cfg, iargs [0]);
9079 MONO_ADD_INS (cfg->cbb, iargs [0]);
9080 NEW_CLASSCONST (cfg, iargs [1], klass);
9081 MONO_ADD_INS (cfg->cbb, iargs [1]);
9082 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9084 inline_costs += 10 * num_calls++;
9087 case CEE_MONO_OBJADDR:
9090 MONO_INST_NEW (cfg, ins, OP_MOVE);
9091 ins->dreg = alloc_preg (cfg);
9092 ins->sreg1 = sp [0]->dreg;
9093 ins->type = STACK_MP;
9094 MONO_ADD_INS (cfg->cbb, ins);
9098 case CEE_MONO_LDNATIVEOBJ:
9100 * Similar to LDOBJ, but instead load the unmanaged
9101 * representation of the vtype to the stack.
9106 token = read32 (ip + 2);
9107 klass = mono_method_get_wrapper_data (method, token);
9108 g_assert (klass->valuetype);
9109 mono_class_init (klass);
9112 MonoInst *src, *dest, *temp;
9115 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9116 temp->backend.is_pinvoke = 1;
9117 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9118 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9120 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9121 dest->type = STACK_VTYPE;
9122 dest->klass = klass;
9128 case CEE_MONO_RETOBJ: {
9130 * Same as RET, but return the native representation of a vtype
9133 g_assert (cfg->ret);
9134 g_assert (mono_method_signature (method)->pinvoke);
9139 token = read32 (ip + 2);
9140 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9142 if (!cfg->vret_addr) {
9143 g_assert (cfg->ret_var_is_local);
9145 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9147 EMIT_NEW_RETLOADA (cfg, ins);
9149 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9151 if (sp != stack_start)
9154 MONO_INST_NEW (cfg, ins, OP_BR);
9155 ins->inst_target_bb = end_bblock;
9156 MONO_ADD_INS (bblock, ins);
9157 link_bblock (cfg, bblock, end_bblock);
9158 start_new_bblock = 1;
9162 case CEE_MONO_CISINST:
9163 case CEE_MONO_CCASTCLASS: {
9168 token = read32 (ip + 2);
9169 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9170 if (ip [1] == CEE_MONO_CISINST)
9171 ins = handle_cisinst (cfg, klass, sp [0]);
9173 ins = handle_ccastclass (cfg, klass, sp [0]);
9179 case CEE_MONO_SAVE_LMF:
9180 case CEE_MONO_RESTORE_LMF:
9181 #ifdef MONO_ARCH_HAVE_LMF_OPS
9182 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9183 MONO_ADD_INS (bblock, ins);
9184 cfg->need_lmf_area = TRUE;
9188 case CEE_MONO_CLASSCONST:
9189 CHECK_STACK_OVF (1);
9191 token = read32 (ip + 2);
9192 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9195 inline_costs += 10 * num_calls++;
9197 case CEE_MONO_NOT_TAKEN:
9198 bblock->out_of_line = TRUE;
9202 CHECK_STACK_OVF (1);
9204 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9205 ins->dreg = alloc_preg (cfg);
9206 ins->inst_offset = (gint32)read32 (ip + 2);
9207 ins->type = STACK_PTR;
9208 MONO_ADD_INS (bblock, ins);
9212 case CEE_MONO_DYN_CALL: {
9215 /* It would be easier to call a trampoline, but that would put an
9216 * extra frame on the stack, confusing exception handling. So
9217 * implement it inline using an opcode for now.
9220 if (!cfg->dyn_call_var) {
9221 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9222 /* prevent it from being register allocated */
9223 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9226 /* Has to use a call inst since it local regalloc expects it */
9227 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9228 ins = (MonoInst*)call;
9230 ins->sreg1 = sp [0]->dreg;
9231 ins->sreg2 = sp [1]->dreg;
9232 MONO_ADD_INS (bblock, ins);
9234 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9235 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9239 inline_costs += 10 * num_calls++;
9244 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9254 /* somewhat similar to LDTOKEN */
9255 MonoInst *addr, *vtvar;
9256 CHECK_STACK_OVF (1);
9257 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9259 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9260 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9262 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9263 ins->type = STACK_VTYPE;
9264 ins->klass = mono_defaults.argumenthandle_class;
9277 * The following transforms:
9278 * CEE_CEQ into OP_CEQ
9279 * CEE_CGT into OP_CGT
9280 * CEE_CGT_UN into OP_CGT_UN
9281 * CEE_CLT into OP_CLT
9282 * CEE_CLT_UN into OP_CLT_UN
9284 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9286 MONO_INST_NEW (cfg, ins, cmp->opcode);
9288 cmp->sreg1 = sp [0]->dreg;
9289 cmp->sreg2 = sp [1]->dreg;
9290 type_from_op (cmp, sp [0], sp [1]);
9292 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9293 cmp->opcode = OP_LCOMPARE;
9294 else if (sp [0]->type == STACK_R8)
9295 cmp->opcode = OP_FCOMPARE;
9297 cmp->opcode = OP_ICOMPARE;
9298 MONO_ADD_INS (bblock, cmp);
9299 ins->type = STACK_I4;
9300 ins->dreg = alloc_dreg (cfg, ins->type);
9301 type_from_op (ins, sp [0], sp [1]);
9303 if (cmp->opcode == OP_FCOMPARE) {
9305 * The backends expect the fceq opcodes to do the
9308 cmp->opcode = OP_NOP;
9309 ins->sreg1 = cmp->sreg1;
9310 ins->sreg2 = cmp->sreg2;
9312 MONO_ADD_INS (bblock, ins);
9319 MonoMethod *cil_method;
9320 gboolean needs_static_rgctx_invoke;
9322 CHECK_STACK_OVF (1);
9324 n = read32 (ip + 2);
9325 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9328 mono_class_init (cmethod->klass);
9330 mono_save_token_info (cfg, image, n, cmethod);
9332 if (cfg->generic_sharing_context)
9333 context_used = mono_method_check_context_used (cmethod);
9335 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9337 cil_method = cmethod;
9338 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9339 METHOD_ACCESS_FAILURE;
9341 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9342 if (check_linkdemand (cfg, method, cmethod))
9344 CHECK_CFG_EXCEPTION;
9345 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9346 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9350 * Optimize the common case of ldftn+delegate creation
9352 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9353 /* FIXME: SGEN support */
9354 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9355 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9356 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9357 MonoInst *target_ins;
9359 int invoke_context_used = 0;
9361 invoke = mono_get_delegate_invoke (ctor_method->klass);
9362 if (!invoke || !mono_method_signature (invoke))
9365 if (cfg->generic_sharing_context)
9366 invoke_context_used = mono_method_check_context_used (invoke);
9368 if (invoke_context_used == 0) {
9370 if (cfg->verbose_level > 3)
9371 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9372 target_ins = sp [-1];
9374 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9375 CHECK_CFG_EXCEPTION;
9384 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9385 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9389 inline_costs += 10 * num_calls++;
9392 case CEE_LDVIRTFTN: {
9397 n = read32 (ip + 2);
9398 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9401 mono_class_init (cmethod->klass);
9403 if (cfg->generic_sharing_context)
9404 context_used = mono_method_check_context_used (cmethod);
9406 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9407 if (check_linkdemand (cfg, method, cmethod))
9409 CHECK_CFG_EXCEPTION;
9410 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9411 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9417 args [1] = emit_get_rgctx_method (cfg, context_used,
9418 cmethod, MONO_RGCTX_INFO_METHOD);
9421 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9423 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9426 inline_costs += 10 * num_calls++;
9430 CHECK_STACK_OVF (1);
9432 n = read16 (ip + 2);
9434 EMIT_NEW_ARGLOAD (cfg, ins, n);
9439 CHECK_STACK_OVF (1);
9441 n = read16 (ip + 2);
9443 NEW_ARGLOADA (cfg, ins, n);
9444 MONO_ADD_INS (cfg->cbb, ins);
9452 n = read16 (ip + 2);
9454 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9456 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9460 CHECK_STACK_OVF (1);
9462 n = read16 (ip + 2);
9464 EMIT_NEW_LOCLOAD (cfg, ins, n);
9469 unsigned char *tmp_ip;
9470 CHECK_STACK_OVF (1);
9472 n = read16 (ip + 2);
9475 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9481 EMIT_NEW_LOCLOADA (cfg, ins, n);
9490 n = read16 (ip + 2);
9492 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9494 emit_stloc_ir (cfg, sp, header, n);
9501 if (sp != stack_start)
9503 if (cfg->method != method)
9505 * Inlining this into a loop in a parent could lead to
9506 * stack overflows which is different behavior than the
9507 * non-inlined case, thus disable inlining in this case.
9509 goto inline_failure;
9511 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9512 ins->dreg = alloc_preg (cfg);
9513 ins->sreg1 = sp [0]->dreg;
9514 ins->type = STACK_PTR;
9515 MONO_ADD_INS (cfg->cbb, ins);
9517 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9519 ins->flags |= MONO_INST_INIT;
9524 case CEE_ENDFILTER: {
9525 MonoExceptionClause *clause, *nearest;
9526 int cc, nearest_num;
9530 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9532 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9533 ins->sreg1 = (*sp)->dreg;
9534 MONO_ADD_INS (bblock, ins);
9535 start_new_bblock = 1;
9540 for (cc = 0; cc < header->num_clauses; ++cc) {
9541 clause = &header->clauses [cc];
9542 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9543 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9544 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9550 if ((ip - header->code) != nearest->handler_offset)
9555 case CEE_UNALIGNED_:
9556 ins_flag |= MONO_INST_UNALIGNED;
9557 /* FIXME: record alignment? we can assume 1 for now */
9562 ins_flag |= MONO_INST_VOLATILE;
9566 ins_flag |= MONO_INST_TAILCALL;
9567 cfg->flags |= MONO_CFG_HAS_TAIL;
9568 /* Can't inline tail calls at this time */
9569 inline_costs += 100000;
9576 token = read32 (ip + 2);
9577 klass = mini_get_class (method, token, generic_context);
9578 CHECK_TYPELOAD (klass);
9579 if (generic_class_is_reference_type (cfg, klass))
9580 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9582 mini_emit_initobj (cfg, *sp, NULL, klass);
9586 case CEE_CONSTRAINED_:
9588 token = read32 (ip + 2);
9589 if (method->wrapper_type != MONO_WRAPPER_NONE)
9590 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9592 constrained_call = mono_class_get_full (image, token, generic_context);
9593 CHECK_TYPELOAD (constrained_call);
9598 MonoInst *iargs [3];
9602 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9603 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9604 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9605 /* emit_memset only works when val == 0 */
9606 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9611 if (ip [1] == CEE_CPBLK) {
9612 MonoMethod *memcpy_method = get_memcpy_method ();
9613 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9615 MonoMethod *memset_method = get_memset_method ();
9616 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9626 ins_flag |= MONO_INST_NOTYPECHECK;
9628 ins_flag |= MONO_INST_NORANGECHECK;
9629 /* we ignore the no-nullcheck for now since we
9630 * really do it explicitly only when doing callvirt->call
9636 int handler_offset = -1;
9638 for (i = 0; i < header->num_clauses; ++i) {
9639 MonoExceptionClause *clause = &header->clauses [i];
9640 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9641 handler_offset = clause->handler_offset;
9646 bblock->flags |= BB_EXCEPTION_UNSAFE;
9648 g_assert (handler_offset != -1);
9650 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9651 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9652 ins->sreg1 = load->dreg;
9653 MONO_ADD_INS (bblock, ins);
9655 link_bblock (cfg, bblock, end_bblock);
9656 start_new_bblock = 1;
9664 CHECK_STACK_OVF (1);
9666 token = read32 (ip + 2);
9667 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9668 MonoType *type = mono_type_create_from_typespec (image, token);
9669 token = mono_type_size (type, &ialign);
9671 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9672 CHECK_TYPELOAD (klass);
9673 mono_class_init (klass);
9674 token = mono_class_value_size (klass, &align);
9676 EMIT_NEW_ICONST (cfg, ins, token);
9681 case CEE_REFANYTYPE: {
9682 MonoInst *src_var, *src;
9688 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9690 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9691 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9692 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9710 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9720 g_warning ("opcode 0x%02x not handled", *ip);
9724 if (start_new_bblock != 1)
9727 bblock->cil_length = ip - bblock->cil_code;
9728 bblock->next_bb = end_bblock;
9730 if (cfg->method == method && cfg->domainvar) {
9732 MonoInst *get_domain;
9734 cfg->cbb = init_localsbb;
9736 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9737 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9740 get_domain->dreg = alloc_preg (cfg);
9741 MONO_ADD_INS (cfg->cbb, get_domain);
9743 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9744 MONO_ADD_INS (cfg->cbb, store);
9747 #ifdef TARGET_POWERPC
9748 if (cfg->compile_aot)
9749 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9750 mono_get_got_var (cfg);
9753 if (cfg->method == method && cfg->got_var)
9754 mono_emit_load_got_addr (cfg);
9759 cfg->cbb = init_localsbb;
9761 for (i = 0; i < header->num_locals; ++i) {
9762 MonoType *ptype = header->locals [i];
9763 int t = ptype->type;
9764 dreg = cfg->locals [i]->dreg;
9766 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9767 t = mono_class_enum_basetype (ptype->data.klass)->type;
9769 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9770 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9771 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9772 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9773 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9774 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9775 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9776 ins->type = STACK_R8;
9777 ins->inst_p0 = (void*)&r8_0;
9778 ins->dreg = alloc_dreg (cfg, STACK_R8);
9779 MONO_ADD_INS (init_localsbb, ins);
9780 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9781 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9782 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9783 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9785 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9790 if (cfg->init_ref_vars && cfg->method == method) {
9791 /* Emit initialization for ref vars */
9792 // FIXME: Avoid duplication initialization for IL locals.
9793 for (i = 0; i < cfg->num_varinfo; ++i) {
9794 MonoInst *ins = cfg->varinfo [i];
9796 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9797 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9801 /* Add a sequence point for method entry/exit events */
9803 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9804 MONO_ADD_INS (init_localsbb, ins);
9805 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9806 MONO_ADD_INS (cfg->bb_exit, ins);
9811 if (cfg->method == method) {
9813 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9814 bb->region = mono_find_block_region (cfg, bb->real_offset);
9816 mono_create_spvar_for_region (cfg, bb->region);
9817 if (cfg->verbose_level > 2)
9818 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9822 g_slist_free (class_inits);
9823 dont_inline = g_list_remove (dont_inline, method);
9825 if (inline_costs < 0) {
9828 /* Method is too large */
9829 mname = mono_method_full_name (method, TRUE);
9830 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9831 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9836 if ((cfg->verbose_level > 2) && (cfg->method == method))
9837 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9839 return inline_costs;
9842 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9843 g_slist_free (class_inits);
9844 mono_basic_block_free (bb);
9845 dont_inline = g_list_remove (dont_inline, method);
9849 g_slist_free (class_inits);
9850 mono_basic_block_free (bb);
9851 dont_inline = g_list_remove (dont_inline, method);
9855 g_slist_free (class_inits);
9856 mono_basic_block_free (bb);
9857 dont_inline = g_list_remove (dont_inline, method);
9858 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9862 g_slist_free (class_inits);
9863 mono_basic_block_free (bb);
9864 dont_inline = g_list_remove (dont_inline, method);
9865 set_exception_type_from_invalid_il (cfg, method, ip);
9870 store_membase_reg_to_store_membase_imm (int opcode)
9873 case OP_STORE_MEMBASE_REG:
9874 return OP_STORE_MEMBASE_IMM;
9875 case OP_STOREI1_MEMBASE_REG:
9876 return OP_STOREI1_MEMBASE_IMM;
9877 case OP_STOREI2_MEMBASE_REG:
9878 return OP_STOREI2_MEMBASE_IMM;
9879 case OP_STOREI4_MEMBASE_REG:
9880 return OP_STOREI4_MEMBASE_IMM;
9881 case OP_STOREI8_MEMBASE_REG:
9882 return OP_STOREI8_MEMBASE_IMM;
9884 g_assert_not_reached ();
9890 #endif /* DISABLE_JIT */
9893 mono_op_to_op_imm (int opcode)
9903 return OP_IDIV_UN_IMM;
9907 return OP_IREM_UN_IMM;
9921 return OP_ISHR_UN_IMM;
9938 return OP_LSHR_UN_IMM;
9941 return OP_COMPARE_IMM;
9943 return OP_ICOMPARE_IMM;
9945 return OP_LCOMPARE_IMM;
9947 case OP_STORE_MEMBASE_REG:
9948 return OP_STORE_MEMBASE_IMM;
9949 case OP_STOREI1_MEMBASE_REG:
9950 return OP_STOREI1_MEMBASE_IMM;
9951 case OP_STOREI2_MEMBASE_REG:
9952 return OP_STOREI2_MEMBASE_IMM;
9953 case OP_STOREI4_MEMBASE_REG:
9954 return OP_STOREI4_MEMBASE_IMM;
9956 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9958 return OP_X86_PUSH_IMM;
9959 case OP_X86_COMPARE_MEMBASE_REG:
9960 return OP_X86_COMPARE_MEMBASE_IMM;
9962 #if defined(TARGET_AMD64)
9963 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9964 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9966 case OP_VOIDCALL_REG:
9975 return OP_LOCALLOC_IMM;
9982 ldind_to_load_membase (int opcode)
9986 return OP_LOADI1_MEMBASE;
9988 return OP_LOADU1_MEMBASE;
9990 return OP_LOADI2_MEMBASE;
9992 return OP_LOADU2_MEMBASE;
9994 return OP_LOADI4_MEMBASE;
9996 return OP_LOADU4_MEMBASE;
9998 return OP_LOAD_MEMBASE;
10000 return OP_LOAD_MEMBASE;
10002 return OP_LOADI8_MEMBASE;
10004 return OP_LOADR4_MEMBASE;
10006 return OP_LOADR8_MEMBASE;
10008 g_assert_not_reached ();
10015 stind_to_store_membase (int opcode)
10019 return OP_STOREI1_MEMBASE_REG;
10021 return OP_STOREI2_MEMBASE_REG;
10023 return OP_STOREI4_MEMBASE_REG;
10025 case CEE_STIND_REF:
10026 return OP_STORE_MEMBASE_REG;
10028 return OP_STOREI8_MEMBASE_REG;
10030 return OP_STORER4_MEMBASE_REG;
10032 return OP_STORER8_MEMBASE_REG;
10034 g_assert_not_reached ();
10041 mono_load_membase_to_load_mem (int opcode)
10043 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10044 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10046 case OP_LOAD_MEMBASE:
10047 return OP_LOAD_MEM;
10048 case OP_LOADU1_MEMBASE:
10049 return OP_LOADU1_MEM;
10050 case OP_LOADU2_MEMBASE:
10051 return OP_LOADU2_MEM;
10052 case OP_LOADI4_MEMBASE:
10053 return OP_LOADI4_MEM;
10054 case OP_LOADU4_MEMBASE:
10055 return OP_LOADU4_MEM;
10056 #if SIZEOF_REGISTER == 8
10057 case OP_LOADI8_MEMBASE:
10058 return OP_LOADI8_MEM;
10067 op_to_op_dest_membase (int store_opcode, int opcode)
10069 #if defined(TARGET_X86)
10070 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10075 return OP_X86_ADD_MEMBASE_REG;
10077 return OP_X86_SUB_MEMBASE_REG;
10079 return OP_X86_AND_MEMBASE_REG;
10081 return OP_X86_OR_MEMBASE_REG;
10083 return OP_X86_XOR_MEMBASE_REG;
10086 return OP_X86_ADD_MEMBASE_IMM;
10089 return OP_X86_SUB_MEMBASE_IMM;
10092 return OP_X86_AND_MEMBASE_IMM;
10095 return OP_X86_OR_MEMBASE_IMM;
10098 return OP_X86_XOR_MEMBASE_IMM;
10104 #if defined(TARGET_AMD64)
10105 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10110 return OP_X86_ADD_MEMBASE_REG;
10112 return OP_X86_SUB_MEMBASE_REG;
10114 return OP_X86_AND_MEMBASE_REG;
10116 return OP_X86_OR_MEMBASE_REG;
10118 return OP_X86_XOR_MEMBASE_REG;
10120 return OP_X86_ADD_MEMBASE_IMM;
10122 return OP_X86_SUB_MEMBASE_IMM;
10124 return OP_X86_AND_MEMBASE_IMM;
10126 return OP_X86_OR_MEMBASE_IMM;
10128 return OP_X86_XOR_MEMBASE_IMM;
10130 return OP_AMD64_ADD_MEMBASE_REG;
10132 return OP_AMD64_SUB_MEMBASE_REG;
10134 return OP_AMD64_AND_MEMBASE_REG;
10136 return OP_AMD64_OR_MEMBASE_REG;
10138 return OP_AMD64_XOR_MEMBASE_REG;
10141 return OP_AMD64_ADD_MEMBASE_IMM;
10144 return OP_AMD64_SUB_MEMBASE_IMM;
10147 return OP_AMD64_AND_MEMBASE_IMM;
10150 return OP_AMD64_OR_MEMBASE_IMM;
10153 return OP_AMD64_XOR_MEMBASE_IMM;
10163 op_to_op_store_membase (int store_opcode, int opcode)
10165 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10168 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10169 return OP_X86_SETEQ_MEMBASE;
10171 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10172 return OP_X86_SETNE_MEMBASE;
10180 op_to_op_src1_membase (int load_opcode, int opcode)
10183 /* FIXME: This has sign extension issues */
10185 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10186 return OP_X86_COMPARE_MEMBASE8_IMM;
10189 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10194 return OP_X86_PUSH_MEMBASE;
10195 case OP_COMPARE_IMM:
10196 case OP_ICOMPARE_IMM:
10197 return OP_X86_COMPARE_MEMBASE_IMM;
10200 return OP_X86_COMPARE_MEMBASE_REG;
10204 #ifdef TARGET_AMD64
10205 /* FIXME: This has sign extension issues */
10207 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10208 return OP_X86_COMPARE_MEMBASE8_IMM;
10213 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10214 return OP_X86_PUSH_MEMBASE;
10216 /* FIXME: This only works for 32 bit immediates
10217 case OP_COMPARE_IMM:
10218 case OP_LCOMPARE_IMM:
10219 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10220 return OP_AMD64_COMPARE_MEMBASE_IMM;
10222 case OP_ICOMPARE_IMM:
10223 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10224 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10228 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10229 return OP_AMD64_COMPARE_MEMBASE_REG;
10232 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10233 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10242 op_to_op_src2_membase (int load_opcode, int opcode)
10245 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10251 return OP_X86_COMPARE_REG_MEMBASE;
10253 return OP_X86_ADD_REG_MEMBASE;
10255 return OP_X86_SUB_REG_MEMBASE;
10257 return OP_X86_AND_REG_MEMBASE;
10259 return OP_X86_OR_REG_MEMBASE;
10261 return OP_X86_XOR_REG_MEMBASE;
10265 #ifdef TARGET_AMD64
10268 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10269 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10273 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10274 return OP_AMD64_COMPARE_REG_MEMBASE;
10277 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10278 return OP_X86_ADD_REG_MEMBASE;
10280 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10281 return OP_X86_SUB_REG_MEMBASE;
10283 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10284 return OP_X86_AND_REG_MEMBASE;
10286 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10287 return OP_X86_OR_REG_MEMBASE;
10289 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10290 return OP_X86_XOR_REG_MEMBASE;
10292 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10293 return OP_AMD64_ADD_REG_MEMBASE;
10295 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10296 return OP_AMD64_SUB_REG_MEMBASE;
10298 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10299 return OP_AMD64_AND_REG_MEMBASE;
10301 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10302 return OP_AMD64_OR_REG_MEMBASE;
10304 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10305 return OP_AMD64_XOR_REG_MEMBASE;
10313 mono_op_to_op_imm_noemul (int opcode)
10316 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10321 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10329 return mono_op_to_op_imm (opcode);
10333 #ifndef DISABLE_JIT
10336 * mono_handle_global_vregs:
10338 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10342 mono_handle_global_vregs (MonoCompile *cfg)
10344 gint32 *vreg_to_bb;
10345 MonoBasicBlock *bb;
10348 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10350 #ifdef MONO_ARCH_SIMD_INTRINSICS
10351 if (cfg->uses_simd_intrinsics)
10352 mono_simd_simplify_indirection (cfg);
10355 /* Find local vregs used in more than one bb */
10356 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10357 MonoInst *ins = bb->code;
10358 int block_num = bb->block_num;
10360 if (cfg->verbose_level > 2)
10361 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10364 for (; ins; ins = ins->next) {
10365 const char *spec = INS_INFO (ins->opcode);
10366 int regtype = 0, regindex;
10369 if (G_UNLIKELY (cfg->verbose_level > 2))
10370 mono_print_ins (ins);
10372 g_assert (ins->opcode >= MONO_CEE_LAST);
10374 for (regindex = 0; regindex < 4; regindex ++) {
10377 if (regindex == 0) {
10378 regtype = spec [MONO_INST_DEST];
10379 if (regtype == ' ')
10382 } else if (regindex == 1) {
10383 regtype = spec [MONO_INST_SRC1];
10384 if (regtype == ' ')
10387 } else if (regindex == 2) {
10388 regtype = spec [MONO_INST_SRC2];
10389 if (regtype == ' ')
10392 } else if (regindex == 3) {
10393 regtype = spec [MONO_INST_SRC3];
10394 if (regtype == ' ')
10399 #if SIZEOF_REGISTER == 4
10400 /* In the LLVM case, the long opcodes are not decomposed */
10401 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10403 * Since some instructions reference the original long vreg,
10404 * and some reference the two component vregs, it is quite hard
10405 * to determine when it needs to be global. So be conservative.
10407 if (!get_vreg_to_inst (cfg, vreg)) {
10408 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10410 if (cfg->verbose_level > 2)
10411 printf ("LONG VREG R%d made global.\n", vreg);
10415 * Make the component vregs volatile since the optimizations can
10416 * get confused otherwise.
10418 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10419 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10423 g_assert (vreg != -1);
10425 prev_bb = vreg_to_bb [vreg];
10426 if (prev_bb == 0) {
10427 /* 0 is a valid block num */
10428 vreg_to_bb [vreg] = block_num + 1;
10429 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10430 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10433 if (!get_vreg_to_inst (cfg, vreg)) {
10434 if (G_UNLIKELY (cfg->verbose_level > 2))
10435 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10439 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10442 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10445 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10448 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10451 g_assert_not_reached ();
10455 /* Flag as having been used in more than one bb */
10456 vreg_to_bb [vreg] = -1;
10462 /* If a variable is used in only one bblock, convert it into a local vreg */
10463 for (i = 0; i < cfg->num_varinfo; i++) {
10464 MonoInst *var = cfg->varinfo [i];
10465 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10467 switch (var->type) {
10473 #if SIZEOF_REGISTER == 8
10476 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10477 /* Enabling this screws up the fp stack on x86 */
10480 /* Arguments are implicitly global */
10481 /* Putting R4 vars into registers doesn't work currently */
10482 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10484 * Make that the variable's liveness interval doesn't contain a call, since
10485 * that would cause the lvreg to be spilled, making the whole optimization
10488 /* This is too slow for JIT compilation */
10490 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10492 int def_index, call_index, ins_index;
10493 gboolean spilled = FALSE;
10498 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10499 const char *spec = INS_INFO (ins->opcode);
10501 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10502 def_index = ins_index;
10504 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10505 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10506 if (call_index > def_index) {
10512 if (MONO_IS_CALL (ins))
10513 call_index = ins_index;
10523 if (G_UNLIKELY (cfg->verbose_level > 2))
10524 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10525 var->flags |= MONO_INST_IS_DEAD;
10526 cfg->vreg_to_inst [var->dreg] = NULL;
10533 * Compress the varinfo and vars tables so the liveness computation is faster and
10534 * takes up less space.
10537 for (i = 0; i < cfg->num_varinfo; ++i) {
10538 MonoInst *var = cfg->varinfo [i];
10539 if (pos < i && cfg->locals_start == i)
10540 cfg->locals_start = pos;
10541 if (!(var->flags & MONO_INST_IS_DEAD)) {
10543 cfg->varinfo [pos] = cfg->varinfo [i];
10544 cfg->varinfo [pos]->inst_c0 = pos;
10545 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10546 cfg->vars [pos].idx = pos;
10547 #if SIZEOF_REGISTER == 4
10548 if (cfg->varinfo [pos]->type == STACK_I8) {
10549 /* Modify the two component vars too */
10552 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10553 var1->inst_c0 = pos;
10554 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10555 var1->inst_c0 = pos;
10562 cfg->num_varinfo = pos;
10563 if (cfg->locals_start > cfg->num_varinfo)
10564 cfg->locals_start = cfg->num_varinfo;
10568 * mono_spill_global_vars:
10570 * Generate spill code for variables which are not allocated to registers,
10571 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10572 * code is generated which could be optimized by the local optimization passes.
10575 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10577 MonoBasicBlock *bb;
10579 int orig_next_vreg;
10580 guint32 *vreg_to_lvreg;
10582 guint32 i, lvregs_len;
10583 gboolean dest_has_lvreg = FALSE;
10584 guint32 stacktypes [128];
10585 MonoInst **live_range_start, **live_range_end;
10586 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10588 *need_local_opts = FALSE;
10590 memset (spec2, 0, sizeof (spec2));
10592 /* FIXME: Move this function to mini.c */
10593 stacktypes ['i'] = STACK_PTR;
10594 stacktypes ['l'] = STACK_I8;
10595 stacktypes ['f'] = STACK_R8;
10596 #ifdef MONO_ARCH_SIMD_INTRINSICS
10597 stacktypes ['x'] = STACK_VTYPE;
10600 #if SIZEOF_REGISTER == 4
10601 /* Create MonoInsts for longs */
10602 for (i = 0; i < cfg->num_varinfo; i++) {
10603 MonoInst *ins = cfg->varinfo [i];
10605 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10606 switch (ins->type) {
10611 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10614 g_assert (ins->opcode == OP_REGOFFSET);
10616 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10618 tree->opcode = OP_REGOFFSET;
10619 tree->inst_basereg = ins->inst_basereg;
10620 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10622 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10624 tree->opcode = OP_REGOFFSET;
10625 tree->inst_basereg = ins->inst_basereg;
10626 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10636 /* FIXME: widening and truncation */
10639 * As an optimization, when a variable allocated to the stack is first loaded into
10640 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10641 * the variable again.
10643 orig_next_vreg = cfg->next_vreg;
10644 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10645 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10649 * These arrays contain the first and last instructions accessing a given
10651 * Since we emit bblocks in the same order we process them here, and we
10652 * don't split live ranges, these will precisely describe the live range of
10653 * the variable, i.e. the instruction range where a valid value can be found
10654 * in the variables location.
10655 * The live range is computed using the liveness info computed by the liveness pass.
10656 * We can't use vmv->range, since that is an abstract live range, and we need
10657 * one which is instruction precise.
10658 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10660 /* FIXME: Only do this if debugging info is requested */
10661 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10662 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10663 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10664 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10666 /* Add spill loads/stores */
10667 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10670 if (cfg->verbose_level > 2)
10671 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10673 /* Clear vreg_to_lvreg array */
10674 for (i = 0; i < lvregs_len; i++)
10675 vreg_to_lvreg [lvregs [i]] = 0;
10679 MONO_BB_FOR_EACH_INS (bb, ins) {
10680 const char *spec = INS_INFO (ins->opcode);
10681 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10682 gboolean store, no_lvreg;
10683 int sregs [MONO_MAX_SRC_REGS];
10685 if (G_UNLIKELY (cfg->verbose_level > 2))
10686 mono_print_ins (ins);
10688 if (ins->opcode == OP_NOP)
10692 * We handle LDADDR here as well, since it can only be decomposed
10693 * when variable addresses are known.
10695 if (ins->opcode == OP_LDADDR) {
10696 MonoInst *var = ins->inst_p0;
10698 if (var->opcode == OP_VTARG_ADDR) {
10699 /* Happens on SPARC/S390 where vtypes are passed by reference */
10700 MonoInst *vtaddr = var->inst_left;
10701 if (vtaddr->opcode == OP_REGVAR) {
10702 ins->opcode = OP_MOVE;
10703 ins->sreg1 = vtaddr->dreg;
10705 else if (var->inst_left->opcode == OP_REGOFFSET) {
10706 ins->opcode = OP_LOAD_MEMBASE;
10707 ins->inst_basereg = vtaddr->inst_basereg;
10708 ins->inst_offset = vtaddr->inst_offset;
10712 g_assert (var->opcode == OP_REGOFFSET);
10714 ins->opcode = OP_ADD_IMM;
10715 ins->sreg1 = var->inst_basereg;
10716 ins->inst_imm = var->inst_offset;
10719 *need_local_opts = TRUE;
10720 spec = INS_INFO (ins->opcode);
10723 if (ins->opcode < MONO_CEE_LAST) {
10724 mono_print_ins (ins);
10725 g_assert_not_reached ();
10729 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10733 if (MONO_IS_STORE_MEMBASE (ins)) {
10734 tmp_reg = ins->dreg;
10735 ins->dreg = ins->sreg2;
10736 ins->sreg2 = tmp_reg;
10739 spec2 [MONO_INST_DEST] = ' ';
10740 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10741 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10742 spec2 [MONO_INST_SRC3] = ' ';
10744 } else if (MONO_IS_STORE_MEMINDEX (ins))
10745 g_assert_not_reached ();
10750 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10751 printf ("\t %.3s %d", spec, ins->dreg);
10752 num_sregs = mono_inst_get_src_registers (ins, sregs);
10753 for (srcindex = 0; srcindex < 3; ++srcindex)
10754 printf (" %d", sregs [srcindex]);
10761 regtype = spec [MONO_INST_DEST];
10762 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10765 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10766 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10767 MonoInst *store_ins;
10769 MonoInst *def_ins = ins;
10770 int dreg = ins->dreg; /* The original vreg */
10772 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10774 if (var->opcode == OP_REGVAR) {
10775 ins->dreg = var->dreg;
10776 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10778 * Instead of emitting a load+store, use a _membase opcode.
10780 g_assert (var->opcode == OP_REGOFFSET);
10781 if (ins->opcode == OP_MOVE) {
10785 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10786 ins->inst_basereg = var->inst_basereg;
10787 ins->inst_offset = var->inst_offset;
10790 spec = INS_INFO (ins->opcode);
10794 g_assert (var->opcode == OP_REGOFFSET);
10796 prev_dreg = ins->dreg;
10798 /* Invalidate any previous lvreg for this vreg */
10799 vreg_to_lvreg [ins->dreg] = 0;
10803 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10805 store_opcode = OP_STOREI8_MEMBASE_REG;
10808 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10810 if (regtype == 'l') {
10811 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10812 mono_bblock_insert_after_ins (bb, ins, store_ins);
10813 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10814 mono_bblock_insert_after_ins (bb, ins, store_ins);
10815 def_ins = store_ins;
10818 g_assert (store_opcode != OP_STOREV_MEMBASE);
10820 /* Try to fuse the store into the instruction itself */
10821 /* FIXME: Add more instructions */
10822 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10823 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10824 ins->inst_imm = ins->inst_c0;
10825 ins->inst_destbasereg = var->inst_basereg;
10826 ins->inst_offset = var->inst_offset;
10827 spec = INS_INFO (ins->opcode);
10828 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10829 ins->opcode = store_opcode;
10830 ins->inst_destbasereg = var->inst_basereg;
10831 ins->inst_offset = var->inst_offset;
10835 tmp_reg = ins->dreg;
10836 ins->dreg = ins->sreg2;
10837 ins->sreg2 = tmp_reg;
10840 spec2 [MONO_INST_DEST] = ' ';
10841 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10842 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10843 spec2 [MONO_INST_SRC3] = ' ';
10845 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10846 // FIXME: The backends expect the base reg to be in inst_basereg
10847 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10849 ins->inst_basereg = var->inst_basereg;
10850 ins->inst_offset = var->inst_offset;
10851 spec = INS_INFO (ins->opcode);
10853 /* printf ("INS: "); mono_print_ins (ins); */
10854 /* Create a store instruction */
10855 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10857 /* Insert it after the instruction */
10858 mono_bblock_insert_after_ins (bb, ins, store_ins);
10860 def_ins = store_ins;
10863 * We can't assign ins->dreg to var->dreg here, since the
10864 * sregs could use it. So set a flag, and do it after
10867 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10868 dest_has_lvreg = TRUE;
10873 if (def_ins && !live_range_start [dreg]) {
10874 live_range_start [dreg] = def_ins;
10875 live_range_start_bb [dreg] = bb;
10882 num_sregs = mono_inst_get_src_registers (ins, sregs);
10883 for (srcindex = 0; srcindex < 3; ++srcindex) {
10884 regtype = spec [MONO_INST_SRC1 + srcindex];
10885 sreg = sregs [srcindex];
10887 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10888 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10889 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10890 MonoInst *use_ins = ins;
10891 MonoInst *load_ins;
10892 guint32 load_opcode;
10894 if (var->opcode == OP_REGVAR) {
10895 sregs [srcindex] = var->dreg;
10896 //mono_inst_set_src_registers (ins, sregs);
10897 live_range_end [sreg] = use_ins;
10898 live_range_end_bb [sreg] = bb;
10902 g_assert (var->opcode == OP_REGOFFSET);
10904 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10906 g_assert (load_opcode != OP_LOADV_MEMBASE);
10908 if (vreg_to_lvreg [sreg]) {
10909 g_assert (vreg_to_lvreg [sreg] != -1);
10911 /* The variable is already loaded to an lvreg */
10912 if (G_UNLIKELY (cfg->verbose_level > 2))
10913 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10914 sregs [srcindex] = vreg_to_lvreg [sreg];
10915 //mono_inst_set_src_registers (ins, sregs);
10919 /* Try to fuse the load into the instruction */
10920 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10921 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10922 sregs [0] = var->inst_basereg;
10923 //mono_inst_set_src_registers (ins, sregs);
10924 ins->inst_offset = var->inst_offset;
10925 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10926 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10927 sregs [1] = var->inst_basereg;
10928 //mono_inst_set_src_registers (ins, sregs);
10929 ins->inst_offset = var->inst_offset;
10931 if (MONO_IS_REAL_MOVE (ins)) {
10932 ins->opcode = OP_NOP;
10935 //printf ("%d ", srcindex); mono_print_ins (ins);
10937 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10939 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10940 if (var->dreg == prev_dreg) {
10942 * sreg refers to the value loaded by the load
10943 * emitted below, but we need to use ins->dreg
10944 * since it refers to the store emitted earlier.
10948 g_assert (sreg != -1);
10949 vreg_to_lvreg [var->dreg] = sreg;
10950 g_assert (lvregs_len < 1024);
10951 lvregs [lvregs_len ++] = var->dreg;
10955 sregs [srcindex] = sreg;
10956 //mono_inst_set_src_registers (ins, sregs);
10958 if (regtype == 'l') {
10959 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10960 mono_bblock_insert_before_ins (bb, ins, load_ins);
10961 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10962 mono_bblock_insert_before_ins (bb, ins, load_ins);
10963 use_ins = load_ins;
10966 #if SIZEOF_REGISTER == 4
10967 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10969 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10970 mono_bblock_insert_before_ins (bb, ins, load_ins);
10971 use_ins = load_ins;
10975 if (var->dreg < orig_next_vreg) {
10976 live_range_end [var->dreg] = use_ins;
10977 live_range_end_bb [var->dreg] = bb;
10981 mono_inst_set_src_registers (ins, sregs);
10983 if (dest_has_lvreg) {
10984 g_assert (ins->dreg != -1);
10985 vreg_to_lvreg [prev_dreg] = ins->dreg;
10986 g_assert (lvregs_len < 1024);
10987 lvregs [lvregs_len ++] = prev_dreg;
10988 dest_has_lvreg = FALSE;
10992 tmp_reg = ins->dreg;
10993 ins->dreg = ins->sreg2;
10994 ins->sreg2 = tmp_reg;
10997 if (MONO_IS_CALL (ins)) {
10998 /* Clear vreg_to_lvreg array */
10999 for (i = 0; i < lvregs_len; i++)
11000 vreg_to_lvreg [lvregs [i]] = 0;
11002 } else if (ins->opcode == OP_NOP) {
11004 MONO_INST_NULLIFY_SREGS (ins);
11007 if (cfg->verbose_level > 2)
11008 mono_print_ins_index (1, ins);
11011 /* Extend the live range based on the liveness info */
11012 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11013 for (i = 0; i < cfg->num_varinfo; i ++) {
11014 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11016 if (vreg_is_volatile (cfg, vi->vreg))
11017 /* The liveness info is incomplete */
11020 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11021 /* Live from at least the first ins of this bb */
11022 live_range_start [vi->vreg] = bb->code;
11023 live_range_start_bb [vi->vreg] = bb;
11026 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11027 /* Live at least until the last ins of this bb */
11028 live_range_end [vi->vreg] = bb->last_ins;
11029 live_range_end_bb [vi->vreg] = bb;
11035 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11037 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11038 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11040 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11041 for (i = 0; i < cfg->num_varinfo; ++i) {
11042 int vreg = MONO_VARINFO (cfg, i)->vreg;
11045 if (live_range_start [vreg]) {
11046 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11048 ins->inst_c1 = vreg;
11049 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11051 if (live_range_end [vreg]) {
11052 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11054 ins->inst_c1 = vreg;
11055 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11056 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11058 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11064 g_free (live_range_start);
11065 g_free (live_range_end);
11066 g_free (live_range_start_bb);
11067 g_free (live_range_end_bb);
11072 * - use 'iadd' instead of 'int_add'
11073 * - handling ovf opcodes: decompose in method_to_ir.
11074 * - unify iregs/fregs
11075 * -> partly done, the missing parts are:
11076 * - a more complete unification would involve unifying the hregs as well, so
11077 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11078 * would no longer map to the machine hregs, so the code generators would need to
11079 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11080 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11081 * fp/non-fp branches speeds it up by about 15%.
11082 * - use sext/zext opcodes instead of shifts
11084 * - get rid of TEMPLOADs if possible and use vregs instead
11085 * - clean up usage of OP_P/OP_ opcodes
11086 * - cleanup usage of DUMMY_USE
11087 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11089 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11090 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11091 * - make sure handle_stack_args () is called before the branch is emitted
11092 * - when the new IR is done, get rid of all unused stuff
11093 * - COMPARE/BEQ as separate instructions or unify them ?
11094 * - keeping them separate allows specialized compare instructions like
11095 * compare_imm, compare_membase
11096 * - most back ends unify fp compare+branch, fp compare+ceq
11097 * - integrate mono_save_args into inline_method
11098 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11099 * - handle long shift opts on 32 bit platforms somehow: they require
11100 * 3 sregs (2 for arg1 and 1 for arg2)
11101 * - make byref a 'normal' type.
11102 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11103 * variable if needed.
11104 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11105 * like inline_method.
11106 * - remove inlining restrictions
11107 * - fix LNEG and enable cfold of INEG
11108 * - generalize x86 optimizations like ldelema as a peephole optimization
11109 * - add store_mem_imm for amd64
11110 * - optimize the loading of the interruption flag in the managed->native wrappers
11111 * - avoid special handling of OP_NOP in passes
11112 * - move code inserting instructions into one function/macro.
11113 * - try a coalescing phase after liveness analysis
11114 * - add float -> vreg conversion + local optimizations on !x86
11115 * - figure out how to handle decomposed branches during optimizations, ie.
11116 * compare+branch, op_jump_table+op_br etc.
11117 * - promote RuntimeXHandles to vregs
11118 * - vtype cleanups:
11119 * - add a NEW_VARLOADA_VREG macro
11120 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11121 * accessing vtype fields.
11122 * - get rid of I8CONST on 64 bit platforms
11123 * - dealing with the increase in code size due to branches created during opcode
11125 * - use extended basic blocks
11126 * - all parts of the JIT
11127 * - handle_global_vregs () && local regalloc
11128 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11129 * - sources of increase in code size:
11132 * - isinst and castclass
11133 * - lvregs not allocated to global registers even if used multiple times
11134 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11136 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11137 * - add all micro optimizations from the old JIT
11138 * - put tree optimizations into the deadce pass
11139 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11140 * specific function.
11141 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11142 * fcompare + branchCC.
11143 * - create a helper function for allocating a stack slot, taking into account
11144 * MONO_CFG_HAS_SPILLUP.
11146 * - merge the ia64 switch changes.
11147 * - optimize mono_regstate2_alloc_int/float.
11148 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11149 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11150 * parts of the tree could be separated by other instructions, killing the tree
11151 * arguments, or stores killing loads etc. Also, should we fold loads into other
11152 * instructions if the result of the load is used multiple times ?
11153 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11154 * - LAST MERGE: 108395.
11155 * - when returning vtypes in registers, generate IR and append it to the end of the
11156 * last bb instead of doing it in the epilog.
11157 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11165 - When to decompose opcodes:
11166 - earlier: this makes some optimizations hard to implement, since the low level IR
11167 no longer contains the neccessary information. But it is easier to do.
11168 - later: harder to implement, enables more optimizations.
11169 - Branches inside bblocks:
11170 - created when decomposing complex opcodes.
11171 - branches to another bblock: harmless, but not tracked by the branch
11172 optimizations, so need to branch to a label at the start of the bblock.
11173 - branches to inside the same bblock: very problematic, trips up the local
11174 reg allocator. Can be fixed by spitting the current bblock, but that is a
11175 complex operation, since some local vregs can become global vregs etc.
11176 - Local/global vregs:
11177 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11178 local register allocator.
11179 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11180 structure, created by mono_create_var (). Assigned to hregs or the stack by
11181 the global register allocator.
11182 - When to do optimizations like alu->alu_imm:
11183 - earlier -> saves work later on since the IR will be smaller/simpler
11184 - later -> can work on more instructions
11185 - Handling of valuetypes:
11186 - When a vtype is pushed on the stack, a new temporary is created, an
11187 instruction computing its address (LDADDR) is emitted and pushed on
11188 the stack. Need to optimize cases when the vtype is used immediately as in
11189 argument passing, stloc etc.
11190 - Instead of the to_end stuff in the old JIT, simply call the function handling
11191 the values on the stack before emitting the last instruction of the bb.
11194 #endif /* DISABLE_JIT */