2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
59 #define BRANCH_COST 100
60 #define INLINE_LENGTH_LIMIT 20
61 #define INLINE_FAILURE do {\
62 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 #define CHECK_CFG_EXCEPTION do {\
66 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 #define METHOD_ACCESS_FAILURE do { \
70 char *method_fname = mono_method_full_name (method, TRUE); \
71 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
72 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
73 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
74 g_free (method_fname); \
75 g_free (cil_method_fname); \
76 goto exception_exit; \
78 #define FIELD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *field_fname = mono_field_full_name (field); \
81 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
82 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (field_fname); \
85 goto exception_exit; \
87 #define GENERIC_SHARING_FAILURE(opcode) do { \
88 if (cfg->generic_sharing_context) { \
89 if (cfg->verbose_level > 2) \
90 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
91 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
92 goto exception_exit; \
96 /* Determine whenever 'ins' represents a load of the 'this' argument */
97 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
99 static int ldind_to_load_membase (int opcode);
100 static int stind_to_store_membase (int opcode);
102 int mono_op_to_op_imm (int opcode);
103 int mono_op_to_op_imm_noemul (int opcode);
105 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
106 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
107 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
109 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
110 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
111 guint inline_offset, gboolean is_virtual_call);
113 /* helper methods signature */
114 extern MonoMethodSignature *helper_sig_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_domain_get;
116 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 * Instruction metadata
125 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
131 #if SIZEOF_VOID_P == 8
136 /* keep in sync with the enum in mini.h */
139 #include "mini-ops.h"
143 extern GHashTable *jit_icall_name_hash;
145 #define MONO_INIT_VARINFO(vi,id) do { \
146 (vi)->range.first_use.pos.bid = 0xffff; \
152 mono_alloc_ireg (MonoCompile *cfg)
154 return alloc_ireg (cfg);
158 mono_alloc_freg (MonoCompile *cfg)
160 return alloc_freg (cfg);
164 mono_alloc_preg (MonoCompile *cfg)
166 return alloc_preg (cfg);
170 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
172 return alloc_dreg (cfg, stack_type);
176 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
182 switch (type->type) {
185 case MONO_TYPE_BOOLEAN:
197 case MONO_TYPE_FNPTR:
199 case MONO_TYPE_CLASS:
200 case MONO_TYPE_STRING:
201 case MONO_TYPE_OBJECT:
202 case MONO_TYPE_SZARRAY:
203 case MONO_TYPE_ARRAY:
207 #if SIZEOF_VOID_P == 8
216 case MONO_TYPE_VALUETYPE:
217 if (type->data.klass->enumtype) {
218 type = type->data.klass->enum_basetype;
221 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
224 case MONO_TYPE_TYPEDBYREF:
226 case MONO_TYPE_GENERICINST:
227 type = &type->data.generic_class->container_class->byval_arg;
231 g_assert (cfg->generic_sharing_context);
234 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
240 mono_print_bb (MonoBasicBlock *bb, const char *msg)
245 printf ("\n%s %d: [IN: ", msg, bb->block_num);
246 for (i = 0; i < bb->in_count; ++i)
247 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
249 for (i = 0; i < bb->out_count; ++i)
250 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
252 for (tree = bb->code; tree; tree = tree->next)
253 mono_print_ins_index (-1, tree);
257 * Can't put this at the beginning, since other files reference stuff from this
262 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
264 #define GET_BBLOCK(cfg,tblock,ip) do { \
265 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
267 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
268 NEW_BBLOCK (cfg, (tblock)); \
269 (tblock)->cil_code = (ip); \
270 ADD_BBLOCK (cfg, (tblock)); \
274 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
275 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
276 int _length_reg = alloc_ireg (cfg); \
277 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
278 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
279 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
283 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
284 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
285 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
288 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
289 ins->sreg1 = array_reg; \
290 ins->sreg2 = index_reg; \
291 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
292 MONO_ADD_INS ((cfg)->cbb, ins); \
293 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
294 (cfg)->cbb->has_array_access = TRUE; \
298 #if defined(__i386__) || defined(__x86_64__)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_VOID_P == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 /* first search for handlers and filters */
473 for (i = 0; i < header->num_clauses; ++i) {
474 clause = &header->clauses [i];
475 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
476 (offset < (clause->handler_offset)))
477 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
479 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
480 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
481 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
482 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
483 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
485 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
489 /* search the try blocks */
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
493 return ((i + 1) << 8) | clause->flags;
500 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
502 MonoMethod *method = cfg->method;
503 MonoMethodHeader *header = mono_method_get_header (method);
504 MonoExceptionClause *clause;
505 MonoBasicBlock *handler;
509 for (i = 0; i < header->num_clauses; ++i) {
510 clause = &header->clauses [i];
511 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
512 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
513 if (clause->flags == type) {
514 handler = cfg->cil_offset_to_bb [clause->handler_offset];
516 res = g_list_append (res, handler);
524 mono_create_spvar_for_region (MonoCompile *cfg, int region)
528 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
532 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
533 /* prevent it from being register allocated */
534 var->flags |= MONO_INST_INDIRECT;
536 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
540 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
542 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
550 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
555 /* prevent it from being register allocated */
556 var->flags |= MONO_INST_INDIRECT;
558 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
564 * Returns the type used in the eval stack when @type is loaded.
565 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
568 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
572 inst->klass = klass = mono_class_from_mono_type (type);
574 inst->type = STACK_MP;
579 switch (type->type) {
581 inst->type = STACK_INV;
585 case MONO_TYPE_BOOLEAN:
591 inst->type = STACK_I4;
596 case MONO_TYPE_FNPTR:
597 inst->type = STACK_PTR;
599 case MONO_TYPE_CLASS:
600 case MONO_TYPE_STRING:
601 case MONO_TYPE_OBJECT:
602 case MONO_TYPE_SZARRAY:
603 case MONO_TYPE_ARRAY:
604 inst->type = STACK_OBJ;
608 inst->type = STACK_I8;
612 inst->type = STACK_R8;
614 case MONO_TYPE_VALUETYPE:
615 if (type->data.klass->enumtype) {
616 type = type->data.klass->enum_basetype;
620 inst->type = STACK_VTYPE;
623 case MONO_TYPE_TYPEDBYREF:
624 inst->klass = mono_defaults.typed_reference_class;
625 inst->type = STACK_VTYPE;
627 case MONO_TYPE_GENERICINST:
628 type = &type->data.generic_class->container_class->byval_arg;
631 case MONO_TYPE_MVAR :
632 /* FIXME: all the arguments must be references for now,
633 * later look inside cfg and see if the arg num is
636 g_assert (cfg->generic_sharing_context);
637 inst->type = STACK_OBJ;
640 g_error ("unknown type 0x%02x in eval stack type", type->type);
645 * The following tables are used to quickly validate the IL code in type_from_op ().
648 bin_num_table [STACK_MAX] [STACK_MAX] = {
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
653 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
654 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
655 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
661 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
664 /* reduce the size of this table */
666 bin_int_table [STACK_MAX] [STACK_MAX] = {
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
678 bin_comp_table [STACK_MAX] [STACK_MAX] = {
679 /* Inv i L p F & O vt */
681 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
682 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
683 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
684 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
685 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
686 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
687 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
690 /* reduce the size of this table */
692 shift_table [STACK_MAX] [STACK_MAX] = {
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
704 * Tables to map from the non-specific opcode to the matching
705 * type-specific opcode.
707 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
709 binops_op_map [STACK_MAX] = {
710 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
713 /* handles from CEE_NEG to CEE_CONV_U8 */
715 unops_op_map [STACK_MAX] = {
716 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
719 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
721 ovfops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
725 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
727 ovf2ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
731 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
733 ovf3ops_op_map [STACK_MAX] = {
734 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
737 /* handles from CEE_BEQ to CEE_BLT_UN */
739 beqops_op_map [STACK_MAX] = {
740 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
743 /* handles from CEE_CEQ to CEE_CLT_UN */
745 ceqops_op_map [STACK_MAX] = {
746 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
750 * Sets ins->type (the type on the eval stack) according to the
751 * type of the opcode and the arguments to it.
752 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
754 * FIXME: this function sets ins->type unconditionally in some cases, but
755 * it should set it to invalid for some types (a conv.x on an object)
758 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
760 switch (ins->opcode) {
767 /* FIXME: check unverifiable args for STACK_MP */
768 ins->type = bin_num_table [src1->type] [src2->type];
769 ins->opcode += binops_op_map [ins->type];
776 ins->type = bin_int_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
782 ins->type = shift_table [src1->type] [src2->type];
783 ins->opcode += binops_op_map [ins->type];
788 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
789 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
790 ins->opcode = OP_LCOMPARE;
791 else if (src1->type == STACK_R8)
792 ins->opcode = OP_FCOMPARE;
794 ins->opcode = OP_ICOMPARE;
796 case OP_ICOMPARE_IMM:
797 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
798 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
799 ins->opcode = OP_LCOMPARE_IMM;
811 ins->opcode += beqops_op_map [src1->type];
814 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
822 ins->opcode += ceqops_op_map [src1->type];
826 ins->type = neg_table [src1->type];
827 ins->opcode += unops_op_map [ins->type];
830 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
831 ins->type = src1->type;
833 ins->type = STACK_INV;
834 ins->opcode += unops_op_map [ins->type];
840 ins->type = STACK_I4;
841 ins->opcode += unops_op_map [src1->type];
844 ins->type = STACK_R8;
845 switch (src1->type) {
848 ins->opcode = OP_ICONV_TO_R_UN;
851 ins->opcode = OP_LCONV_TO_R_UN;
855 case CEE_CONV_OVF_I1:
856 case CEE_CONV_OVF_U1:
857 case CEE_CONV_OVF_I2:
858 case CEE_CONV_OVF_U2:
859 case CEE_CONV_OVF_I4:
860 case CEE_CONV_OVF_U4:
861 ins->type = STACK_I4;
862 ins->opcode += ovf3ops_op_map [src1->type];
864 case CEE_CONV_OVF_I_UN:
865 case CEE_CONV_OVF_U_UN:
866 ins->type = STACK_PTR;
867 ins->opcode += ovf2ops_op_map [src1->type];
869 case CEE_CONV_OVF_I1_UN:
870 case CEE_CONV_OVF_I2_UN:
871 case CEE_CONV_OVF_I4_UN:
872 case CEE_CONV_OVF_U1_UN:
873 case CEE_CONV_OVF_U2_UN:
874 case CEE_CONV_OVF_U4_UN:
875 ins->type = STACK_I4;
876 ins->opcode += ovf2ops_op_map [src1->type];
879 ins->type = STACK_PTR;
880 switch (src1->type) {
882 ins->opcode = OP_MOVE;
886 #if SIZEOF_VOID_P == 8
887 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_MOVE;
893 ins->opcode = OP_LCONV_TO_U;
896 ins->opcode = OP_FCONV_TO_U;
902 ins->type = STACK_I8;
903 ins->opcode += unops_op_map [src1->type];
905 case CEE_CONV_OVF_I8:
906 case CEE_CONV_OVF_U8:
907 ins->type = STACK_I8;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_U8_UN:
911 case CEE_CONV_OVF_I8_UN:
912 ins->type = STACK_I8;
913 ins->opcode += ovf2ops_op_map [src1->type];
917 ins->type = STACK_R8;
918 ins->opcode += unops_op_map [src1->type];
921 ins->type = STACK_R8;
925 ins->type = STACK_I4;
926 ins->opcode += ovfops_op_map [src1->type];
931 ins->type = STACK_PTR;
932 ins->opcode += ovfops_op_map [src1->type];
940 ins->type = bin_num_table [src1->type] [src2->type];
941 ins->opcode += ovfops_op_map [src1->type];
942 if (ins->type == STACK_R8)
943 ins->type = STACK_INV;
945 case OP_LOAD_MEMBASE:
946 ins->type = STACK_PTR;
948 case OP_LOADI1_MEMBASE:
949 case OP_LOADU1_MEMBASE:
950 case OP_LOADI2_MEMBASE:
951 case OP_LOADU2_MEMBASE:
952 case OP_LOADI4_MEMBASE:
953 case OP_LOADU4_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI8_MEMBASE:
957 ins->type = STACK_I8;
959 case OP_LOADR4_MEMBASE:
960 case OP_LOADR8_MEMBASE:
961 ins->type = STACK_R8;
964 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
968 if (ins->type == STACK_MP)
969 ins->klass = mono_defaults.object_class;
974 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
980 param_table [STACK_MAX] [STACK_MAX] = {
985 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
989 switch (args->type) {
999 for (i = 0; i < sig->param_count; ++i) {
1000 switch (args [i].type) {
1004 if (!sig->params [i]->byref)
1008 if (sig->params [i]->byref)
1010 switch (sig->params [i]->type) {
1011 case MONO_TYPE_CLASS:
1012 case MONO_TYPE_STRING:
1013 case MONO_TYPE_OBJECT:
1014 case MONO_TYPE_SZARRAY:
1015 case MONO_TYPE_ARRAY:
1022 if (sig->params [i]->byref)
1024 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1033 /*if (!param_table [args [i].type] [sig->params [i]->type])
1041 * When we need a pointer to the current domain many times in a method, we
1042 * call mono_domain_get() once and we store the result in a local variable.
1043 * This function returns the variable that represents the MonoDomain*.
1045 inline static MonoInst *
1046 mono_get_domainvar (MonoCompile *cfg)
1048 if (!cfg->domainvar)
1049 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 return cfg->domainvar;
1054 * The got_var contains the address of the Global Offset Table when AOT
1057 inline static MonoInst *
1058 mono_get_got_var (MonoCompile *cfg)
1060 #ifdef MONO_ARCH_NEED_GOT_VAR
1061 if (!cfg->compile_aot)
1063 if (!cfg->got_var) {
1064 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1066 return cfg->got_var;
1073 mono_get_vtable_var (MonoCompile *cfg)
1075 g_assert (cfg->generic_sharing_context);
1077 if (!cfg->rgctx_var) {
1078 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1079 /* force the var to be stack allocated */
1080 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1083 return cfg->rgctx_var;
1087 type_from_stack_type (MonoInst *ins) {
1088 switch (ins->type) {
1089 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1090 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1091 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1092 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1094 return &ins->klass->this_arg;
1095 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1096 case STACK_VTYPE: return &ins->klass->byval_arg;
1098 g_error ("stack type %d to monotype not handled\n", ins->type);
1103 static G_GNUC_UNUSED int
1104 type_to_stack_type (MonoType *t)
1106 switch (mono_type_get_underlying_type (t)->type) {
1109 case MONO_TYPE_BOOLEAN:
1112 case MONO_TYPE_CHAR:
1119 case MONO_TYPE_FNPTR:
1121 case MONO_TYPE_CLASS:
1122 case MONO_TYPE_STRING:
1123 case MONO_TYPE_OBJECT:
1124 case MONO_TYPE_SZARRAY:
1125 case MONO_TYPE_ARRAY:
1133 case MONO_TYPE_VALUETYPE:
1134 case MONO_TYPE_TYPEDBYREF:
1136 case MONO_TYPE_GENERICINST:
1137 if (mono_type_generic_inst_is_valuetype (t))
1143 g_assert_not_reached ();
1150 array_access_to_klass (int opcode)
1154 return mono_defaults.byte_class;
1156 return mono_defaults.uint16_class;
1159 return mono_defaults.int_class;
1162 return mono_defaults.sbyte_class;
1165 return mono_defaults.int16_class;
1168 return mono_defaults.int32_class;
1170 return mono_defaults.uint32_class;
1173 return mono_defaults.int64_class;
1176 return mono_defaults.single_class;
1179 return mono_defaults.double_class;
1180 case CEE_LDELEM_REF:
1181 case CEE_STELEM_REF:
1182 return mono_defaults.object_class;
1184 g_assert_not_reached ();
1190 * We try to share variables when possible
1193 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1198 /* inlining can result in deeper stacks */
1199 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1200 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1202 pos = ins->type - 1 + slot * STACK_MAX;
1204 switch (ins->type) {
1211 if ((vnum = cfg->intvars [pos]))
1212 return cfg->varinfo [vnum];
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 cfg->intvars [pos] = res->inst_c0;
1217 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1226 * Don't use this if a generic_context is set, since that means AOT can't
1227 * look up the method using just the image+token.
1229 if (cfg->compile_aot && !cfg->generic_context) {
1230 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1231 jump_info_token->image = image;
1232 jump_info_token->token = token;
1233 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1238 * This function is called to handle items that are left on the evaluation stack
1239 * at basic block boundaries. What happens is that we save the values to local variables
1240 * and we reload them later when first entering the target basic block (with the
1241 * handle_loaded_temps () function).
1242 * A single joint point will use the same variables (stored in the array bb->out_stack or
1243 * bb->in_stack, if the basic block is before or after the joint point).
1245 * This function needs to be called _before_ emitting the last instruction of
1246 * the bb (i.e. before emitting a branch).
1247 * If the stack merge fails at a join point, cfg->unverifiable is set.
1250 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1253 MonoBasicBlock *bb = cfg->cbb;
1254 MonoBasicBlock *outb;
1255 MonoInst *inst, **locals;
1260 if (cfg->verbose_level > 3)
1261 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1262 if (!bb->out_scount) {
1263 bb->out_scount = count;
1264 //printf ("bblock %d has out:", bb->block_num);
1266 for (i = 0; i < bb->out_count; ++i) {
1267 outb = bb->out_bb [i];
1268 /* exception handlers are linked, but they should not be considered for stack args */
1269 if (outb->flags & BB_EXCEPTION_HANDLER)
1271 //printf (" %d", outb->block_num);
1272 if (outb->in_stack) {
1274 bb->out_stack = outb->in_stack;
1280 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1281 for (i = 0; i < count; ++i) {
1283 * try to reuse temps already allocated for this purpouse, if they occupy the same
1284 * stack slot and if they are of the same type.
1285 * This won't cause conflicts since if 'local' is used to
1286 * store one of the values in the in_stack of a bblock, then
1287 * the same variable will be used for the same outgoing stack
1289 * This doesn't work when inlining methods, since the bblocks
1290 * in the inlined methods do not inherit their in_stack from
1291 * the bblock they are inlined to. See bug #58863 for an
1294 if (cfg->inlined_method)
1295 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1297 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1302 for (i = 0; i < bb->out_count; ++i) {
1303 outb = bb->out_bb [i];
1304 /* exception handlers are linked, but they should not be considered for stack args */
1305 if (outb->flags & BB_EXCEPTION_HANDLER)
1307 if (outb->in_scount) {
1308 if (outb->in_scount != bb->out_scount) {
1309 cfg->unverifiable = TRUE;
1312 continue; /* check they are the same locals */
1314 outb->in_scount = count;
1315 outb->in_stack = bb->out_stack;
1318 locals = bb->out_stack;
1320 for (i = 0; i < count; ++i) {
1321 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1322 inst->cil_code = sp [i]->cil_code;
1323 sp [i] = locals [i];
1324 if (cfg->verbose_level > 3)
1325 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1329 * It is possible that the out bblocks already have in_stack assigned, and
1330 * the in_stacks differ. In this case, we will store to all the different
1337 /* Find a bblock which has a different in_stack */
1339 while (bindex < bb->out_count) {
1340 outb = bb->out_bb [bindex];
1341 /* exception handlers are linked, but they should not be considered for stack args */
1342 if (outb->flags & BB_EXCEPTION_HANDLER) {
1346 if (outb->in_stack != locals) {
1347 for (i = 0; i < count; ++i) {
1348 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1349 inst->cil_code = sp [i]->cil_code;
1350 sp [i] = locals [i];
1351 if (cfg->verbose_level > 3)
1352 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1354 locals = outb->in_stack;
1363 /* Emit code which loads interface_offsets [klass->interface_id]
1364 * The array is stored in memory before vtable.
1367 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1369 if (cfg->compile_aot) {
1370 int ioffset_reg = alloc_preg (cfg);
1371 int iid_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1374 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1383 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1384 * stored in "klass_reg" implements the interface "klass".
1387 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1389 int ibitmap_reg = alloc_preg (cfg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1416 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1417 * stored in "vtable_reg" implements the interface "klass".
1420 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1422 int ibitmap_reg = alloc_preg (cfg);
1423 int ibitmap_byte_reg = alloc_preg (cfg);
1425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1427 if (cfg->compile_aot) {
1428 int iid_reg = alloc_preg (cfg);
1429 int shifted_iid_reg = alloc_preg (cfg);
1430 int ibitmap_byte_address_reg = alloc_preg (cfg);
1431 int masked_iid_reg = alloc_preg (cfg);
1432 int iid_one_bit_reg = alloc_preg (cfg);
1433 int iid_bit_reg = alloc_preg (cfg);
1434 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1439 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1440 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1441 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1443 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1449 * Emit code which checks whenever the interface id of @klass is smaller than
1450 * than the value given by max_iid_reg.
1453 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1454 MonoBasicBlock *false_target)
1456 if (cfg->compile_aot) {
1457 int iid_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1466 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1469 /* Same as above, but obtains max_iid from a vtable */
1471 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 /* Same as above, but obtains max_iid from a klass */
1482 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1483 MonoBasicBlock *false_target)
1485 int max_iid_reg = alloc_preg (cfg);
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1488 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1492 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1494 int idepth_reg = alloc_preg (cfg);
1495 int stypes_reg = alloc_preg (cfg);
1496 int stype = alloc_preg (cfg);
1498 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1501 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1503 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1505 if (cfg->compile_aot) {
1506 int const_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1508 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1516 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1518 int intf_reg = alloc_preg (cfg);
1520 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1521 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1526 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1530 * Variant of the above that takes a register to the class, not the vtable.
1533 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1535 int intf_bit_reg = alloc_preg (cfg);
1537 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1538 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1547 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1549 if (cfg->compile_aot) {
1550 int const_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1552 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1556 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1560 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1562 if (cfg->compile_aot) {
1563 int const_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1573 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1576 int rank_reg = alloc_preg (cfg);
1577 int eclass_reg = alloc_preg (cfg);
1579 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1581 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1582 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1584 if (klass->cast_class == mono_defaults.object_class) {
1585 int parent_reg = alloc_preg (cfg);
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1587 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1588 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1589 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1590 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1591 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1592 } else if (klass->cast_class == mono_defaults.enum_class) {
1593 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1594 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1595 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1597 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1598 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1601 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1602 /* Check that the object is a vector too */
1603 int bounds_reg = alloc_preg (cfg);
1604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1606 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1609 int idepth_reg = alloc_preg (cfg);
1610 int stypes_reg = alloc_preg (cfg);
1611 int stype = alloc_preg (cfg);
1613 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1614 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1616 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1620 mini_emit_class_check (cfg, stype, klass);
1625 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1629 g_assert (val == 0);
1634 if ((size <= 4) && (size <= align)) {
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1640 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1643 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1645 #if SIZEOF_VOID_P == 8
1647 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1653 val_reg = alloc_preg (cfg);
1655 if (sizeof (gpointer) == 8)
1656 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1658 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1661 /* This could be optimized further if neccesary */
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1670 #if !NO_UNALIGNED_ACCESS
1671 if (sizeof (gpointer) == 8) {
1673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1702 #endif /* DISABLE_JIT */
1705 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1713 /* This could be optimized further if neccesary */
1715 cur_reg = alloc_preg (cfg);
1716 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1724 #if !NO_UNALIGNED_ACCESS
1725 if (sizeof (gpointer) == 8) {
1727 cur_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1738 cur_reg = alloc_preg (cfg);
1739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1746 cur_reg = alloc_preg (cfg);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1766 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1769 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1772 type = mini_get_basic_type_from_generic (gsctx, type);
1773 switch (type->type) {
1774 case MONO_TYPE_VOID:
1775 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1778 case MONO_TYPE_BOOLEAN:
1781 case MONO_TYPE_CHAR:
1784 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 case MONO_TYPE_FNPTR:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1790 case MONO_TYPE_CLASS:
1791 case MONO_TYPE_STRING:
1792 case MONO_TYPE_OBJECT:
1793 case MONO_TYPE_SZARRAY:
1794 case MONO_TYPE_ARRAY:
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1798 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1801 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1802 case MONO_TYPE_VALUETYPE:
1803 if (type->data.klass->enumtype) {
1804 type = type->data.klass->enum_basetype;
1807 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1808 case MONO_TYPE_TYPEDBYREF:
1809 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1810 case MONO_TYPE_GENERICINST:
1811 type = &type->data.generic_class->container_class->byval_arg;
1814 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1820 * target_type_is_incompatible:
1821 * @cfg: MonoCompile context
1823 * Check that the item @arg on the evaluation stack can be stored
1824 * in the target type (can be a local, or field, etc).
1825 * The cfg arg can be used to check if we need verification or just
1828 * Returns: non-0 value if arg can't be stored on a target.
1831 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1833 MonoType *simple_type;
1836 if (target->byref) {
1837 /* FIXME: check that the pointed to types match */
1838 if (arg->type == STACK_MP)
1839 return arg->klass != mono_class_from_mono_type (target);
1840 if (arg->type == STACK_PTR)
1845 simple_type = mono_type_get_underlying_type (target);
1846 switch (simple_type->type) {
1847 case MONO_TYPE_VOID:
1851 case MONO_TYPE_BOOLEAN:
1854 case MONO_TYPE_CHAR:
1857 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1861 /* STACK_MP is needed when setting pinned locals */
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1867 case MONO_TYPE_FNPTR:
1868 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1871 case MONO_TYPE_CLASS:
1872 case MONO_TYPE_STRING:
1873 case MONO_TYPE_OBJECT:
1874 case MONO_TYPE_SZARRAY:
1875 case MONO_TYPE_ARRAY:
1876 if (arg->type != STACK_OBJ)
1878 /* FIXME: check type compatibility */
1882 if (arg->type != STACK_I8)
1887 if (arg->type != STACK_R8)
1890 case MONO_TYPE_VALUETYPE:
1891 if (arg->type != STACK_VTYPE)
1893 klass = mono_class_from_mono_type (simple_type);
1894 if (klass != arg->klass)
1897 case MONO_TYPE_TYPEDBYREF:
1898 if (arg->type != STACK_VTYPE)
1900 klass = mono_class_from_mono_type (simple_type);
1901 if (klass != arg->klass)
1904 case MONO_TYPE_GENERICINST:
1905 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1906 if (arg->type != STACK_VTYPE)
1908 klass = mono_class_from_mono_type (simple_type);
1909 if (klass != arg->klass)
1913 if (arg->type != STACK_OBJ)
1915 /* FIXME: check type compatibility */
1919 case MONO_TYPE_MVAR:
1920 /* FIXME: all the arguments must be references for now,
1921 * later look inside cfg and see if the arg num is
1922 * really a reference
1924 g_assert (cfg->generic_sharing_context);
1925 if (arg->type != STACK_OBJ)
1929 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1935 * Prepare arguments for passing to a function call.
1936 * Return a non-zero value if the arguments can't be passed to the given
1938 * The type checks are not yet complete and some conversions may need
1939 * casts on 32 or 64 bit architectures.
1941 * FIXME: implement this using target_type_is_incompatible ()
1944 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1946 MonoType *simple_type;
1950 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1954 for (i = 0; i < sig->param_count; ++i) {
1955 if (sig->params [i]->byref) {
1956 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1960 simple_type = sig->params [i];
1961 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1963 switch (simple_type->type) {
1964 case MONO_TYPE_VOID:
1969 case MONO_TYPE_BOOLEAN:
1972 case MONO_TYPE_CHAR:
1975 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1981 case MONO_TYPE_FNPTR:
1982 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (args [i]->type != STACK_OBJ)
1995 if (args [i]->type != STACK_I8)
2000 if (args [i]->type != STACK_R8)
2003 case MONO_TYPE_VALUETYPE:
2004 if (simple_type->data.klass->enumtype) {
2005 simple_type = simple_type->data.klass->enum_basetype;
2008 if (args [i]->type != STACK_VTYPE)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (args [i]->type != STACK_VTYPE)
2015 case MONO_TYPE_GENERICINST:
2016 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2020 g_error ("unknown type 0x%02x in check_call_signature",
2028 callvirt_to_call (int opcode)
2033 case OP_VOIDCALLVIRT:
2042 g_assert_not_reached ();
2049 callvirt_to_call_membase (int opcode)
2053 return OP_CALL_MEMBASE;
2054 case OP_VOIDCALLVIRT:
2055 return OP_VOIDCALL_MEMBASE;
2057 return OP_FCALL_MEMBASE;
2059 return OP_LCALL_MEMBASE;
2061 return OP_VCALL_MEMBASE;
2063 g_assert_not_reached ();
2069 #ifdef MONO_ARCH_HAVE_IMT
2071 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2073 #ifdef MONO_ARCH_IMT_REG
2074 int method_reg = alloc_preg (cfg);
2077 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2078 } else if (cfg->compile_aot) {
2079 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2082 MONO_INST_NEW (cfg, ins, OP_PCONST);
2083 ins->inst_p0 = call->method;
2084 ins->dreg = method_reg;
2085 MONO_ADD_INS (cfg->cbb, ins);
2088 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2090 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2095 static MonoJumpInfo *
2096 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2098 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2102 ji->data.target = target;
2107 inline static MonoInst*
2108 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2110 inline static MonoCallInst *
2111 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2112 MonoInst **args, int calli, int virtual)
2115 #ifdef MONO_ARCH_SOFT_FLOAT
2119 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2122 call->signature = sig;
2124 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2126 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2127 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2130 temp->backend.is_pinvoke = sig->pinvoke;
2133 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2134 * address of return value to increase optimization opportunities.
2135 * Before vtype decomposition, the dreg of the call ins itself represents the
2136 * fact the call modifies the return value. After decomposition, the call will
2137 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2138 * will be transformed into an LDADDR.
2140 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2141 loada->dreg = alloc_preg (cfg);
2142 loada->inst_p0 = temp;
2143 /* We reference the call too since call->dreg could change during optimization */
2144 loada->inst_p1 = call;
2145 MONO_ADD_INS (cfg->cbb, loada);
2147 call->inst.dreg = temp->dreg;
2149 call->vret_var = loada;
2150 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2151 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2153 #ifdef MONO_ARCH_SOFT_FLOAT
2155 * If the call has a float argument, we would need to do an r8->r4 conversion using
2156 * an icall, but that cannot be done during the call sequence since it would clobber
2157 * the call registers + the stack. So we do it before emitting the call.
2159 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2161 MonoInst *in = call->args [i];
2163 if (i >= sig->hasthis)
2164 t = sig->params [i - sig->hasthis];
2166 t = &mono_defaults.int_class->byval_arg;
2167 t = mono_type_get_underlying_type (t);
2169 if (!t->byref && t->type == MONO_TYPE_R4) {
2170 MonoInst *iargs [1];
2174 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2176 /* The result will be in an int vreg */
2177 call->args [i] = conv;
2182 mono_arch_emit_call (cfg, call);
2184 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2185 cfg->flags |= MONO_CFG_HAS_CALLS;
2190 inline static MonoInst*
2191 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2193 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2195 call->inst.sreg1 = addr->dreg;
2197 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2199 return (MonoInst*)call;
2202 inline static MonoInst*
2203 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2205 #ifdef MONO_ARCH_RGCTX_REG
2210 rgctx_reg = mono_alloc_preg (cfg);
2211 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2213 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2215 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2216 cfg->uses_rgctx_reg = TRUE;
2218 return (MonoInst*)call;
2220 g_assert_not_reached ();
2226 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2227 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2229 gboolean virtual = this != NULL;
2230 gboolean enable_for_aot = TRUE;
2233 if (method->string_ctor) {
2234 /* Create the real signature */
2235 /* FIXME: Cache these */
2236 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2237 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2242 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2244 if (this && sig->hasthis &&
2245 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2246 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2247 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2249 call->method = method;
2251 call->inst.flags |= MONO_INST_HAS_METHOD;
2252 call->inst.inst_left = this;
2255 int vtable_reg, slot_reg, this_reg;
2257 this_reg = this->dreg;
2259 if ((!cfg->compile_aot || enable_for_aot) &&
2260 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2261 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2262 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2264 * the method is not virtual, we just need to ensure this is not null
2265 * and then we can call the method directly.
2267 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2268 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2271 if (!method->string_ctor) {
2272 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2273 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2274 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2277 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2279 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2281 return (MonoInst*)call;
2284 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2285 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2286 /* Make a call to delegate->invoke_impl */
2287 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2288 call->inst.inst_basereg = this_reg;
2289 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2290 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2292 return (MonoInst*)call;
2296 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2297 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2298 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2300 * the method is virtual, but we can statically dispatch since either
2301 * it's class or the method itself are sealed.
2302 * But first we need to ensure it's not a null reference.
2304 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2305 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2306 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2308 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2309 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2311 return (MonoInst*)call;
2314 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2316 /* Initialize method->slot */
2317 mono_class_setup_vtable (method->klass);
2319 vtable_reg = alloc_preg (cfg);
2320 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2321 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2323 #ifdef MONO_ARCH_HAVE_IMT
2325 guint32 imt_slot = mono_method_get_imt_slot (method);
2326 emit_imt_argument (cfg, call, imt_arg);
2327 slot_reg = vtable_reg;
2328 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2331 if (slot_reg == -1) {
2332 slot_reg = alloc_preg (cfg);
2333 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2334 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2337 slot_reg = vtable_reg;
2338 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2339 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2341 g_assert (mono_method_signature (method)->generic_param_count);
2342 emit_imt_argument (cfg, call, imt_arg);
2346 call->inst.sreg1 = slot_reg;
2347 call->virtual = TRUE;
2350 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2352 return (MonoInst*)call;
2356 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2357 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2364 #ifdef MONO_ARCH_RGCTX_REG
2365 rgctx_reg = mono_alloc_preg (cfg);
2366 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2371 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2373 call = (MonoCallInst*)ins;
2375 #ifdef MONO_ARCH_RGCTX_REG
2376 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2377 cfg->uses_rgctx_reg = TRUE;
2386 static inline MonoInst*
2387 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2389 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2393 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2400 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2403 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2405 return (MonoInst*)call;
2408 inline static MonoInst*
2409 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2411 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2415 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2419 * mono_emit_abs_call:
2421 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2423 inline static MonoInst*
2424 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2425 MonoMethodSignature *sig, MonoInst **args)
2427 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2431 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2434 if (cfg->abs_patches == NULL)
2435 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2436 g_hash_table_insert (cfg->abs_patches, ji, ji);
2437 ins = mono_emit_native_call (cfg, ji, sig, args);
2438 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2443 get_memcpy_method (void)
2445 static MonoMethod *memcpy_method = NULL;
2446 if (!memcpy_method) {
2447 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2449 g_error ("Old corlib found. Install a new one");
2451 return memcpy_method;
2455 * Emit code to copy a valuetype of type @klass whose address is stored in
2456 * @src->dreg to memory whose address is stored at @dest->dreg.
2459 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2461 MonoInst *iargs [3];
2464 MonoMethod *memcpy_method;
2468 * This check breaks with spilled vars... need to handle it during verification anyway.
2469 * g_assert (klass && klass == src->klass && klass == dest->klass);
2473 n = mono_class_native_size (klass, &align);
2475 n = mono_class_value_size (klass, &align);
2477 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2478 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2479 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2483 EMIT_NEW_ICONST (cfg, iargs [2], n);
2485 memcpy_method = get_memcpy_method ();
2486 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2491 get_memset_method (void)
2493 static MonoMethod *memset_method = NULL;
2494 if (!memset_method) {
2495 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2497 g_error ("Old corlib found. Install a new one");
2499 return memset_method;
2503 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2505 MonoInst *iargs [3];
2508 MonoMethod *memset_method;
2510 /* FIXME: Optimize this for the case when dest is an LDADDR */
2512 mono_class_init (klass);
2513 n = mono_class_value_size (klass, &align);
2515 if (n <= sizeof (gpointer) * 5) {
2516 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2519 memset_method = get_memset_method ();
2521 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2522 EMIT_NEW_ICONST (cfg, iargs [2], n);
2523 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2528 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2530 MonoInst *this = NULL;
2532 g_assert (cfg->generic_sharing_context);
2534 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2535 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2536 !method->klass->valuetype)
2537 EMIT_NEW_ARGLOAD (cfg, this, 0);
2539 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2540 MonoInst *mrgctx_loc, *mrgctx_var;
2543 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2545 mrgctx_loc = mono_get_vtable_var (cfg);
2546 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2549 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2550 MonoInst *vtable_loc, *vtable_var;
2554 vtable_loc = mono_get_vtable_var (cfg);
2555 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2557 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2558 MonoInst *mrgctx_var = vtable_var;
2561 vtable_reg = alloc_preg (cfg);
2562 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2563 vtable_var->type = STACK_PTR;
2569 int vtable_reg, res_reg;
2571 vtable_reg = alloc_preg (cfg);
2572 res_reg = alloc_preg (cfg);
2573 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2578 static MonoJumpInfoRgctxEntry *
2579 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2581 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2582 res->method = method;
2583 res->in_mrgctx = in_mrgctx;
2584 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2585 res->data->type = patch_type;
2586 res->data->data.target = patch_data;
2587 res->info_type = info_type;
2592 static inline MonoInst*
2593 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2595 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2599 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2600 MonoClass *klass, int rgctx_type)
2602 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2603 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2605 return emit_rgctx_fetch (cfg, rgctx, entry);
2609 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2610 MonoMethod *cmethod, int rgctx_type)
2612 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2613 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2615 return emit_rgctx_fetch (cfg, rgctx, entry);
2619 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2620 MonoClassField *field, int rgctx_type)
2622 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2623 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2625 return emit_rgctx_fetch (cfg, rgctx, entry);
2629 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2631 int vtable_reg = alloc_preg (cfg);
2632 int context_used = 0;
2634 if (cfg->generic_sharing_context)
2635 context_used = mono_class_check_context_used (array_class);
2637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2639 if (cfg->opt & MONO_OPT_SHARED) {
2640 int class_reg = alloc_preg (cfg);
2641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2642 if (cfg->compile_aot) {
2643 int klass_reg = alloc_preg (cfg);
2644 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2649 } else if (context_used) {
2650 MonoInst *vtable_ins;
2652 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2653 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2655 if (cfg->compile_aot) {
2656 int vt_reg = alloc_preg (cfg);
2657 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2664 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2668 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2670 if (mini_get_debug_options ()->better_cast_details) {
2671 int to_klass_reg = alloc_preg (cfg);
2672 int vtable_reg = alloc_preg (cfg);
2673 int klass_reg = alloc_preg (cfg);
2674 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2677 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2681 MONO_ADD_INS (cfg->cbb, tls_get);
2682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2686 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2692 reset_cast_details (MonoCompile *cfg)
2694 /* Reset the variables holding the cast details */
2695 if (mini_get_debug_options ()->better_cast_details) {
2696 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2698 MONO_ADD_INS (cfg->cbb, tls_get);
2699 /* It is enough to reset the from field */
2700 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2705 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2706 * generic code is generated.
2709 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2711 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2714 MonoInst *rgctx, *addr;
2716 /* FIXME: What if the class is shared? We might not
2717 have to get the address of the method from the
2719 addr = emit_get_rgctx_method (cfg, context_used, method,
2720 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2722 rgctx = emit_get_rgctx (cfg, method, context_used);
2724 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2726 return mono_emit_method_call (cfg, method, &val, NULL);
2731 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2735 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2736 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2737 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2738 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2740 obj_reg = sp [0]->dreg;
2741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2744 /* FIXME: generics */
2745 g_assert (klass->rank == 0);
2748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2755 MonoInst *element_class;
2757 /* This assertion is from the unboxcast insn */
2758 g_assert (klass->rank == 0);
2760 element_class = emit_get_rgctx_klass (cfg, context_used,
2761 klass->element_class, MONO_RGCTX_INFO_KLASS);
2763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2766 save_cast_details (cfg, klass->element_class, obj_reg);
2767 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2768 reset_cast_details (cfg);
2771 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2772 MONO_ADD_INS (cfg->cbb, add);
2773 add->type = STACK_MP;
2780 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2782 MonoInst *iargs [2];
2785 if (cfg->opt & MONO_OPT_SHARED) {
2786 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2787 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2789 alloc_ftn = mono_object_new;
2790 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2791 /* This happens often in argument checking code, eg. throw new FooException... */
2792 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2793 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2794 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2796 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2797 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2800 if (managed_alloc) {
2801 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2802 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2804 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2806 guint32 lw = vtable->klass->instance_size;
2807 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2808 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2809 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2812 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2816 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2820 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2823 MonoInst *iargs [2];
2824 MonoMethod *managed_alloc = NULL;
2828 FIXME: we cannot get managed_alloc here because we can't get
2829 the class's vtable (because it's not a closed class)
2831 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2832 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2835 if (cfg->opt & MONO_OPT_SHARED) {
2836 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2837 iargs [1] = data_inst;
2838 alloc_ftn = mono_object_new;
2840 if (managed_alloc) {
2841 iargs [0] = data_inst;
2842 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2845 iargs [0] = data_inst;
2846 alloc_ftn = mono_object_new_specific;
2849 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2853 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2855 MonoInst *alloc, *ins;
2857 if (mono_class_is_nullable (klass)) {
2858 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2859 return mono_emit_method_call (cfg, method, &val, NULL);
2862 alloc = handle_alloc (cfg, klass, TRUE);
2864 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2870 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2872 MonoInst *alloc, *ins;
2874 if (mono_class_is_nullable (klass)) {
2875 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2876 /* FIXME: What if the class is shared? We might not
2877 have to get the method address from the RGCTX. */
2878 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2879 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2880 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2882 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2884 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2886 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2893 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2895 MonoBasicBlock *is_null_bb;
2896 int obj_reg = src->dreg;
2897 int vtable_reg = alloc_preg (cfg);
2899 NEW_BBLOCK (cfg, is_null_bb);
2901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2904 save_cast_details (cfg, klass, obj_reg);
2906 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2907 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2908 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2910 int klass_reg = alloc_preg (cfg);
2912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2914 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2915 /* the remoting code is broken, access the class for now */
2917 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2923 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2926 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2930 MONO_START_BB (cfg, is_null_bb);
2932 reset_cast_details (cfg);
2938 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2941 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2942 int obj_reg = src->dreg;
2943 int vtable_reg = alloc_preg (cfg);
2944 int res_reg = alloc_preg (cfg);
2946 NEW_BBLOCK (cfg, is_null_bb);
2947 NEW_BBLOCK (cfg, false_bb);
2948 NEW_BBLOCK (cfg, end_bb);
2950 /* Do the assignment at the beginning, so the other assignment can be if converted */
2951 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2952 ins->type = STACK_OBJ;
2955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2958 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2959 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2960 /* the is_null_bb target simply copies the input register to the output */
2961 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2963 int klass_reg = alloc_preg (cfg);
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2968 int rank_reg = alloc_preg (cfg);
2969 int eclass_reg = alloc_preg (cfg);
2971 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2974 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2976 if (klass->cast_class == mono_defaults.object_class) {
2977 int parent_reg = alloc_preg (cfg);
2978 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2979 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2980 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2982 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2983 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2984 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2986 } else if (klass->cast_class == mono_defaults.enum_class) {
2987 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2989 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2990 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2992 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2993 /* Check that the object is a vector too */
2994 int bounds_reg = alloc_preg (cfg);
2995 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2997 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3000 /* the is_null_bb target simply copies the input register to the output */
3001 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3003 } else if (mono_class_is_nullable (klass)) {
3004 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3005 /* the is_null_bb target simply copies the input register to the output */
3006 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3008 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3009 /* the remoting code is broken, access the class for now */
3011 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3015 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3017 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3020 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3021 /* the is_null_bb target simply copies the input register to the output */
3022 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3027 MONO_START_BB (cfg, false_bb);
3029 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3032 MONO_START_BB (cfg, is_null_bb);
3034 MONO_START_BB (cfg, end_bb);
3040 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3042 /* This opcode takes as input an object reference and a class, and returns:
3043 0) if the object is an instance of the class,
3044 1) if the object is not instance of the class,
3045 2) if the object is a proxy whose type cannot be determined */
3048 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3049 int obj_reg = src->dreg;
3050 int dreg = alloc_ireg (cfg);
3052 int klass_reg = alloc_preg (cfg);
3054 NEW_BBLOCK (cfg, true_bb);
3055 NEW_BBLOCK (cfg, false_bb);
3056 NEW_BBLOCK (cfg, false2_bb);
3057 NEW_BBLOCK (cfg, end_bb);
3058 NEW_BBLOCK (cfg, no_proxy_bb);
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3063 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3064 NEW_BBLOCK (cfg, interface_fail_bb);
3066 tmp_reg = alloc_preg (cfg);
3067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3068 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3069 MONO_START_BB (cfg, interface_fail_bb);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3072 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3074 tmp_reg = alloc_preg (cfg);
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3079 tmp_reg = alloc_preg (cfg);
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3083 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3084 tmp_reg = alloc_preg (cfg);
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3088 tmp_reg = alloc_preg (cfg);
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3091 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3093 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3094 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3096 MONO_START_BB (cfg, no_proxy_bb);
3098 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3101 MONO_START_BB (cfg, false_bb);
3103 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3106 MONO_START_BB (cfg, false2_bb);
3108 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3111 MONO_START_BB (cfg, true_bb);
3113 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3115 MONO_START_BB (cfg, end_bb);
3118 MONO_INST_NEW (cfg, ins, OP_ICONST);
3120 ins->type = STACK_I4;
3126 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3128 /* This opcode takes as input an object reference and a class, and returns:
3129 0) if the object is an instance of the class,
3130 1) if the object is a proxy whose type cannot be determined
3131 an InvalidCastException exception is thrown otherwhise*/
3134 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3135 int obj_reg = src->dreg;
3136 int dreg = alloc_ireg (cfg);
3137 int tmp_reg = alloc_preg (cfg);
3138 int klass_reg = alloc_preg (cfg);
3140 NEW_BBLOCK (cfg, end_bb);
3141 NEW_BBLOCK (cfg, ok_result_bb);
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3146 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3147 NEW_BBLOCK (cfg, interface_fail_bb);
3149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3150 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3151 MONO_START_BB (cfg, interface_fail_bb);
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3154 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3156 tmp_reg = alloc_preg (cfg);
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3159 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3161 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3165 NEW_BBLOCK (cfg, no_proxy_bb);
3167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3169 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3171 tmp_reg = alloc_preg (cfg);
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3175 tmp_reg = alloc_preg (cfg);
3176 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3180 NEW_BBLOCK (cfg, fail_1_bb);
3182 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3184 MONO_START_BB (cfg, fail_1_bb);
3186 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3189 MONO_START_BB (cfg, no_proxy_bb);
3191 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3194 MONO_START_BB (cfg, ok_result_bb);
3196 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3198 MONO_START_BB (cfg, end_bb);
3201 MONO_INST_NEW (cfg, ins, OP_ICONST);
3203 ins->type = STACK_I4;
3208 static G_GNUC_UNUSED MonoInst*
3209 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3211 gpointer *trampoline;
3212 MonoInst *obj, *method_ins, *tramp_ins;
3216 obj = handle_alloc (cfg, klass, FALSE);
3218 /* Inline the contents of mono_delegate_ctor */
3220 /* Set target field */
3221 /* Optimize away setting of NULL target */
3222 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3223 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3225 /* Set method field */
3226 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3227 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3230 * To avoid looking up the compiled code belonging to the target method
3231 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3232 * store it, and we fill it after the method has been compiled.
3234 if (!cfg->compile_aot && !method->dynamic) {
3235 MonoInst *code_slot_ins;
3237 domain = mono_domain_get ();
3238 mono_domain_lock (domain);
3239 if (!domain_jit_info (domain)->method_code_hash)
3240 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3241 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3243 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3244 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3246 mono_domain_unlock (domain);
3248 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3249 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3252 /* Set invoke_impl field */
3253 trampoline = mono_create_delegate_trampoline (klass);
3254 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3257 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3263 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3265 MonoJitICallInfo *info;
3267 /* Need to register the icall so it gets an icall wrapper */
3268 info = mono_get_array_new_va_icall (rank);
3270 cfg->flags |= MONO_CFG_HAS_VARARGS;
3272 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3273 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3277 mono_emit_load_got_addr (MonoCompile *cfg)
3279 MonoInst *getaddr, *dummy_use;
3281 if (!cfg->got_var || cfg->got_var_allocated)
3284 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3285 getaddr->dreg = cfg->got_var->dreg;
3287 /* Add it to the start of the first bblock */
3288 if (cfg->bb_entry->code) {
3289 getaddr->next = cfg->bb_entry->code;
3290 cfg->bb_entry->code = getaddr;
3293 MONO_ADD_INS (cfg->bb_entry, getaddr);
3295 cfg->got_var_allocated = TRUE;
3298 * Add a dummy use to keep the got_var alive, since real uses might
3299 * only be generated by the back ends.
3300 * Add it to end_bblock, so the variable's lifetime covers the whole
3302 * It would be better to make the usage of the got var explicit in all
3303 * cases when the backend needs it (i.e. calls, throw etc.), so this
3304 * wouldn't be needed.
3306 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3307 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3311 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3313 MonoMethodHeader *header = mono_method_get_header (method);
3315 #ifdef MONO_ARCH_SOFT_FLOAT
3316 MonoMethodSignature *sig = mono_method_signature (method);
3320 if (cfg->generic_sharing_context)
3323 #ifdef MONO_ARCH_HAVE_LMF_OPS
3324 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3325 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3326 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3330 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3331 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3332 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3333 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3334 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3335 (method->klass->marshalbyref) ||
3336 !header || header->num_clauses)
3339 /* also consider num_locals? */
3340 /* Do the size check early to avoid creating vtables */
3341 if (getenv ("MONO_INLINELIMIT")) {
3342 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3345 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3349 * if we can initialize the class of the method right away, we do,
3350 * otherwise we don't allow inlining if the class needs initialization,
3351 * since it would mean inserting a call to mono_runtime_class_init()
3352 * inside the inlined code
3354 if (!(cfg->opt & MONO_OPT_SHARED)) {
3355 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3356 if (cfg->run_cctors && method->klass->has_cctor) {
3357 if (!method->klass->runtime_info)
3358 /* No vtable created yet */
3360 vtable = mono_class_vtable (cfg->domain, method->klass);
3363 /* This makes so that inline cannot trigger */
3364 /* .cctors: too many apps depend on them */
3365 /* running with a specific order... */
3366 if (! vtable->initialized)
3368 mono_runtime_class_init (vtable);
3370 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3371 if (!method->klass->runtime_info)
3372 /* No vtable created yet */
3374 vtable = mono_class_vtable (cfg->domain, method->klass);
3377 if (!vtable->initialized)
3382 * If we're compiling for shared code
3383 * the cctor will need to be run at aot method load time, for example,
3384 * or at the end of the compilation of the inlining method.
3386 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3391 * CAS - do not inline methods with declarative security
3392 * Note: this has to be before any possible return TRUE;
3394 if (mono_method_has_declsec (method))
3397 #ifdef MONO_ARCH_SOFT_FLOAT
3399 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3401 for (i = 0; i < sig->param_count; ++i)
3402 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3410 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3412 if (vtable->initialized && !cfg->compile_aot)
3415 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3418 if (!mono_class_needs_cctor_run (vtable->klass, method))
3421 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3422 /* The initialization is already done before the method is called */
3429 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3433 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3435 mono_class_init (klass);
3436 size = mono_class_array_element_size (klass);
3438 mult_reg = alloc_preg (cfg);
3439 array_reg = arr->dreg;
3440 index_reg = index->dreg;
3442 #if SIZEOF_VOID_P == 8
3443 /* The array reg is 64 bits but the index reg is only 32 */
3444 index2_reg = alloc_preg (cfg);
3445 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3447 index2_reg = index_reg;
3450 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3452 #if defined(__i386__) || defined(__x86_64__)
3453 if (size == 1 || size == 2 || size == 4 || size == 8) {
3454 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3456 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3457 ins->type = STACK_PTR;
3463 add_reg = alloc_preg (cfg);
3465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3466 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3467 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3468 ins->type = STACK_PTR;
3469 MONO_ADD_INS (cfg->cbb, ins);
3474 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3476 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3478 int bounds_reg = alloc_preg (cfg);
3479 int add_reg = alloc_preg (cfg);
3480 int mult_reg = alloc_preg (cfg);
3481 int mult2_reg = alloc_preg (cfg);
3482 int low1_reg = alloc_preg (cfg);
3483 int low2_reg = alloc_preg (cfg);
3484 int high1_reg = alloc_preg (cfg);
3485 int high2_reg = alloc_preg (cfg);
3486 int realidx1_reg = alloc_preg (cfg);
3487 int realidx2_reg = alloc_preg (cfg);
3488 int sum_reg = alloc_preg (cfg);
3493 mono_class_init (klass);
3494 size = mono_class_array_element_size (klass);
3496 index1 = index_ins1->dreg;
3497 index2 = index_ins2->dreg;
3499 /* range checking */
3500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3501 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3503 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3504 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3505 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3507 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3508 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3509 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3512 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3513 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3515 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3516 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3517 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3519 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3520 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3522 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3523 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3525 ins->type = STACK_MP;
3527 MONO_ADD_INS (cfg->cbb, ins);
3534 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3538 MonoMethod *addr_method;
3541 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3544 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3546 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3547 /* emit_ldelema_2 depends on OP_LMUL */
3548 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3549 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3553 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3554 addr_method = mono_marshal_get_array_address (rank, element_size);
3555 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3561 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3563 MonoInst *ins = NULL;
3565 static MonoClass *runtime_helpers_class = NULL;
3566 if (! runtime_helpers_class)
3567 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3568 "System.Runtime.CompilerServices", "RuntimeHelpers");
3570 if (cmethod->klass == mono_defaults.string_class) {
3571 if (strcmp (cmethod->name, "get_Chars") == 0) {
3572 int dreg = alloc_ireg (cfg);
3573 int index_reg = alloc_preg (cfg);
3574 int mult_reg = alloc_preg (cfg);
3575 int add_reg = alloc_preg (cfg);
3577 #if SIZEOF_VOID_P == 8
3578 /* The array reg is 64 bits but the index reg is only 32 */
3579 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3581 index_reg = args [1]->dreg;
3583 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3585 #if defined(__i386__) || defined(__x86_64__)
3586 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3587 add_reg = ins->dreg;
3588 /* Avoid a warning */
3590 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3595 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3596 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3598 type_from_op (ins, NULL, NULL);
3600 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3601 int dreg = alloc_ireg (cfg);
3602 /* Decompose later to allow more optimizations */
3603 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3604 ins->type = STACK_I4;
3605 cfg->cbb->has_array_access = TRUE;
3606 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3609 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3610 int mult_reg = alloc_preg (cfg);
3611 int add_reg = alloc_preg (cfg);
3613 /* The corlib functions check for oob already. */
3614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3615 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3616 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3619 } else if (cmethod->klass == mono_defaults.object_class) {
3621 if (strcmp (cmethod->name, "GetType") == 0) {
3622 int dreg = alloc_preg (cfg);
3623 int vt_reg = alloc_preg (cfg);
3624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3625 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3626 type_from_op (ins, NULL, NULL);
3629 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3630 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3631 int dreg = alloc_ireg (cfg);
3632 int t1 = alloc_ireg (cfg);
3634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3635 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3636 ins->type = STACK_I4;
3640 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3641 MONO_INST_NEW (cfg, ins, OP_NOP);
3642 MONO_ADD_INS (cfg->cbb, ins);
3646 } else if (cmethod->klass == mono_defaults.array_class) {
3647 if (cmethod->name [0] != 'g')
3650 if (strcmp (cmethod->name, "get_Rank") == 0) {
3651 int dreg = alloc_ireg (cfg);
3652 int vtable_reg = alloc_preg (cfg);
3653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3654 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3655 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3656 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3657 type_from_op (ins, NULL, NULL);
3660 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3661 int dreg = alloc_ireg (cfg);
3663 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3664 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3665 type_from_op (ins, NULL, NULL);
3670 } else if (cmethod->klass == runtime_helpers_class) {
3672 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3673 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3677 } else if (cmethod->klass == mono_defaults.thread_class) {
3678 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3679 ins->dreg = alloc_preg (cfg);
3680 ins->type = STACK_OBJ;
3681 MONO_ADD_INS (cfg->cbb, ins);
3683 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3684 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3685 MONO_ADD_INS (cfg->cbb, ins);
3687 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3688 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3689 MONO_ADD_INS (cfg->cbb, ins);
3692 } else if (mini_class_is_system_array (cmethod->klass) &&
3693 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3694 MonoInst *addr, *store, *load;
3695 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3697 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3698 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3699 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3701 } else if (cmethod->klass->image == mono_defaults.corlib &&
3702 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3703 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3706 #if SIZEOF_VOID_P == 8
3707 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3708 /* 64 bit reads are already atomic */
3709 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3710 ins->dreg = mono_alloc_preg (cfg);
3711 ins->inst_basereg = args [0]->dreg;
3712 ins->inst_offset = 0;
3713 MONO_ADD_INS (cfg->cbb, ins);
3717 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3718 if (strcmp (cmethod->name, "Increment") == 0) {
3719 MonoInst *ins_iconst;
3722 if (fsig->params [0]->type == MONO_TYPE_I4)
3723 opcode = OP_ATOMIC_ADD_NEW_I4;
3724 #if SIZEOF_VOID_P == 8
3725 else if (fsig->params [0]->type == MONO_TYPE_I8)
3726 opcode = OP_ATOMIC_ADD_NEW_I8;
3729 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3730 ins_iconst->inst_c0 = 1;
3731 ins_iconst->dreg = mono_alloc_ireg (cfg);
3732 MONO_ADD_INS (cfg->cbb, ins_iconst);
3734 MONO_INST_NEW (cfg, ins, opcode);
3735 ins->dreg = mono_alloc_ireg (cfg);
3736 ins->inst_basereg = args [0]->dreg;
3737 ins->inst_offset = 0;
3738 ins->sreg2 = ins_iconst->dreg;
3739 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3740 MONO_ADD_INS (cfg->cbb, ins);
3742 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3743 MonoInst *ins_iconst;
3746 if (fsig->params [0]->type == MONO_TYPE_I4)
3747 opcode = OP_ATOMIC_ADD_NEW_I4;
3748 #if SIZEOF_VOID_P == 8
3749 else if (fsig->params [0]->type == MONO_TYPE_I8)
3750 opcode = OP_ATOMIC_ADD_NEW_I8;
3753 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3754 ins_iconst->inst_c0 = -1;
3755 ins_iconst->dreg = mono_alloc_ireg (cfg);
3756 MONO_ADD_INS (cfg->cbb, ins_iconst);
3758 MONO_INST_NEW (cfg, ins, opcode);
3759 ins->dreg = mono_alloc_ireg (cfg);
3760 ins->inst_basereg = args [0]->dreg;
3761 ins->inst_offset = 0;
3762 ins->sreg2 = ins_iconst->dreg;
3763 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3764 MONO_ADD_INS (cfg->cbb, ins);
3766 } else if (strcmp (cmethod->name, "Add") == 0) {
3769 if (fsig->params [0]->type == MONO_TYPE_I4)
3770 opcode = OP_ATOMIC_ADD_NEW_I4;
3771 #if SIZEOF_VOID_P == 8
3772 else if (fsig->params [0]->type == MONO_TYPE_I8)
3773 opcode = OP_ATOMIC_ADD_NEW_I8;
3777 MONO_INST_NEW (cfg, ins, opcode);
3778 ins->dreg = mono_alloc_ireg (cfg);
3779 ins->inst_basereg = args [0]->dreg;
3780 ins->inst_offset = 0;
3781 ins->sreg2 = args [1]->dreg;
3782 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3783 MONO_ADD_INS (cfg->cbb, ins);
3786 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3788 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3789 if (strcmp (cmethod->name, "Exchange") == 0) {
3792 if (fsig->params [0]->type == MONO_TYPE_I4)
3793 opcode = OP_ATOMIC_EXCHANGE_I4;
3794 #if SIZEOF_VOID_P == 8
3795 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3796 (fsig->params [0]->type == MONO_TYPE_I) ||
3797 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3798 opcode = OP_ATOMIC_EXCHANGE_I8;
3800 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3801 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3802 opcode = OP_ATOMIC_EXCHANGE_I4;
3807 MONO_INST_NEW (cfg, ins, opcode);
3808 ins->dreg = mono_alloc_ireg (cfg);
3809 ins->inst_basereg = args [0]->dreg;
3810 ins->inst_offset = 0;
3811 ins->sreg2 = args [1]->dreg;
3812 MONO_ADD_INS (cfg->cbb, ins);
3814 switch (fsig->params [0]->type) {
3816 ins->type = STACK_I4;
3820 ins->type = STACK_I8;
3822 case MONO_TYPE_OBJECT:
3823 ins->type = STACK_OBJ;
3826 g_assert_not_reached ();
3829 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3831 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3833 * Can't implement CompareExchange methods this way since they have
3834 * three arguments. We can implement one of the common cases, where the new
3835 * value is a constant.
3837 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3838 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3839 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3840 ins->dreg = alloc_ireg (cfg);
3841 ins->sreg1 = args [0]->dreg;
3842 ins->sreg2 = args [1]->dreg;
3843 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3844 ins->type = STACK_I4;
3845 MONO_ADD_INS (cfg->cbb, ins);
3847 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3849 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3853 } else if (cmethod->klass->image == mono_defaults.corlib) {
3854 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3855 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3856 MONO_INST_NEW (cfg, ins, OP_BREAK);
3857 MONO_ADD_INS (cfg->cbb, ins);
3860 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3861 && strcmp (cmethod->klass->name, "Environment") == 0) {
3862 #ifdef PLATFORM_WIN32
3863 EMIT_NEW_ICONST (cfg, ins, 1);
3865 EMIT_NEW_ICONST (cfg, ins, 0);
3869 } else if (cmethod->klass == mono_defaults.math_class) {
3871 * There is general branches code for Min/Max, but it does not work for
3873 * http://everything2.com/?node_id=1051618
3877 #ifdef MONO_ARCH_SIMD_INTRINSICS
3878 if (cfg->opt & MONO_OPT_SIMD) {
3879 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3885 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3889 * This entry point could be used later for arbitrary method
3892 inline static MonoInst*
3893 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3894 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3896 if (method->klass == mono_defaults.string_class) {
3897 /* managed string allocation support */
3898 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3899 MonoInst *iargs [2];
3900 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3901 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3904 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3905 iargs [1] = args [0];
3906 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3913 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3915 MonoInst *store, *temp;
3918 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3919 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3922 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3923 * would be different than the MonoInst's used to represent arguments, and
3924 * the ldelema implementation can't deal with that.
3925 * Solution: When ldelema is used on an inline argument, create a var for
3926 * it, emit ldelema on that var, and emit the saving code below in
3927 * inline_method () if needed.
3929 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3930 cfg->args [i] = temp;
3931 /* This uses cfg->args [i] which is set by the preceeding line */
3932 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3933 store->cil_code = sp [0]->cil_code;
3938 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3939 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3941 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3943 check_inline_called_method_name_limit (MonoMethod *called_method)
3946 static char *limit = NULL;
3948 if (limit == NULL) {
3949 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3951 if (limit_string != NULL)
3952 limit = limit_string;
3954 limit = (char *) "";
3957 if (limit [0] != '\0') {
3958 char *called_method_name = mono_method_full_name (called_method, TRUE);
3960 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3961 g_free (called_method_name);
3963 //return (strncmp_result <= 0);
3964 return (strncmp_result == 0);
3971 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3973 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3976 static char *limit = NULL;
3978 if (limit == NULL) {
3979 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3980 if (limit_string != NULL) {
3981 limit = limit_string;
3983 limit = (char *) "";
3987 if (limit [0] != '\0') {
3988 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3990 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3991 g_free (caller_method_name);
3993 //return (strncmp_result <= 0);
3994 return (strncmp_result == 0);
4002 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4003 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4005 MonoInst *ins, *rvar = NULL;
4006 MonoMethodHeader *cheader;
4007 MonoBasicBlock *ebblock, *sbblock;
4009 MonoMethod *prev_inlined_method;
4010 MonoInst **prev_locals, **prev_args;
4011 MonoType **prev_arg_types;
4012 guint prev_real_offset;
4013 GHashTable *prev_cbb_hash;
4014 MonoBasicBlock **prev_cil_offset_to_bb;
4015 MonoBasicBlock *prev_cbb;
4016 unsigned char* prev_cil_start;
4017 guint32 prev_cil_offset_to_bb_len;
4018 MonoMethod *prev_current_method;
4019 MonoGenericContext *prev_generic_context;
4021 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4023 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4024 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4027 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4028 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4032 if (cfg->verbose_level > 2)
4033 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4035 if (!cmethod->inline_info) {
4036 mono_jit_stats.inlineable_methods++;
4037 cmethod->inline_info = 1;
4039 /* allocate space to store the return value */
4040 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4041 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4044 /* allocate local variables */
4045 cheader = mono_method_get_header (cmethod);
4046 prev_locals = cfg->locals;
4047 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4048 for (i = 0; i < cheader->num_locals; ++i)
4049 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4051 /* allocate start and end blocks */
4052 /* This is needed so if the inline is aborted, we can clean up */
4053 NEW_BBLOCK (cfg, sbblock);
4054 sbblock->real_offset = real_offset;
4056 NEW_BBLOCK (cfg, ebblock);
4057 ebblock->block_num = cfg->num_bblocks++;
4058 ebblock->real_offset = real_offset;
4060 prev_args = cfg->args;
4061 prev_arg_types = cfg->arg_types;
4062 prev_inlined_method = cfg->inlined_method;
4063 cfg->inlined_method = cmethod;
4064 cfg->ret_var_set = FALSE;
4065 prev_real_offset = cfg->real_offset;
4066 prev_cbb_hash = cfg->cbb_hash;
4067 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4068 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4069 prev_cil_start = cfg->cil_start;
4070 prev_cbb = cfg->cbb;
4071 prev_current_method = cfg->current_method;
4072 prev_generic_context = cfg->generic_context;
4074 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4076 cfg->inlined_method = prev_inlined_method;
4077 cfg->real_offset = prev_real_offset;
4078 cfg->cbb_hash = prev_cbb_hash;
4079 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4080 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4081 cfg->cil_start = prev_cil_start;
4082 cfg->locals = prev_locals;
4083 cfg->args = prev_args;
4084 cfg->arg_types = prev_arg_types;
4085 cfg->current_method = prev_current_method;
4086 cfg->generic_context = prev_generic_context;
4088 if ((costs >= 0 && costs < 60) || inline_allways) {
4089 if (cfg->verbose_level > 2)
4090 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4092 mono_jit_stats.inlined_methods++;
4094 /* always add some code to avoid block split failures */
4095 MONO_INST_NEW (cfg, ins, OP_NOP);
4096 MONO_ADD_INS (prev_cbb, ins);
4098 prev_cbb->next_bb = sbblock;
4099 link_bblock (cfg, prev_cbb, sbblock);
4102 * Get rid of the begin and end bblocks if possible to aid local
4105 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4107 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4108 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4110 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4111 MonoBasicBlock *prev = ebblock->in_bb [0];
4112 mono_merge_basic_blocks (cfg, prev, ebblock);
4120 * If the inlined method contains only a throw, then the ret var is not
4121 * set, so set it to a dummy value.
4123 if (!cfg->ret_var_set) {
4124 static double r8_0 = 0.0;
4126 switch (rvar->type) {
4128 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4131 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4136 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4139 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4140 ins->type = STACK_R8;
4141 ins->inst_p0 = (void*)&r8_0;
4142 ins->dreg = rvar->dreg;
4143 MONO_ADD_INS (cfg->cbb, ins);
4146 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4149 g_assert_not_reached ();
4153 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4158 if (cfg->verbose_level > 2)
4159 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4160 cfg->exception_type = MONO_EXCEPTION_NONE;
4161 mono_loader_clear_error ();
4163 /* This gets rid of the newly added bblocks */
4164 cfg->cbb = prev_cbb;
4170 * Some of these comments may well be out-of-date.
4171 * Design decisions: we do a single pass over the IL code (and we do bblock
4172 * splitting/merging in the few cases when it's required: a back jump to an IL
4173 * address that was not already seen as bblock starting point).
4174 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4175 * Complex operations are decomposed in simpler ones right away. We need to let the
4176 * arch-specific code peek and poke inside this process somehow (except when the
4177 * optimizations can take advantage of the full semantic info of coarse opcodes).
4178 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4179 * MonoInst->opcode initially is the IL opcode or some simplification of that
4180 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4181 * opcode with value bigger than OP_LAST.
4182 * At this point the IR can be handed over to an interpreter, a dumb code generator
4183 * or to the optimizing code generator that will translate it to SSA form.
4185 * Profiling directed optimizations.
4186 * We may compile by default with few or no optimizations and instrument the code
4187 * or the user may indicate what methods to optimize the most either in a config file
4188 * or through repeated runs where the compiler applies offline the optimizations to
4189 * each method and then decides if it was worth it.
4192 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4193 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4194 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4195 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4196 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4197 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4198 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4199 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4201 /* offset from br.s -> br like opcodes */
4202 #define BIG_BRANCH_OFFSET 13
4205 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4207 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4209 return b == NULL || b == bb;
4213 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4215 unsigned char *ip = start;
4216 unsigned char *target;
4219 MonoBasicBlock *bblock;
4220 const MonoOpcode *opcode;
4223 cli_addr = ip - start;
4224 i = mono_opcode_value ((const guint8 **)&ip, end);
4227 opcode = &mono_opcodes [i];
4228 switch (opcode->argument) {
4229 case MonoInlineNone:
4232 case MonoInlineString:
4233 case MonoInlineType:
4234 case MonoInlineField:
4235 case MonoInlineMethod:
4238 case MonoShortInlineR:
4245 case MonoShortInlineVar:
4246 case MonoShortInlineI:
4249 case MonoShortInlineBrTarget:
4250 target = start + cli_addr + 2 + (signed char)ip [1];
4251 GET_BBLOCK (cfg, bblock, target);
4254 GET_BBLOCK (cfg, bblock, ip);
4256 case MonoInlineBrTarget:
4257 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4258 GET_BBLOCK (cfg, bblock, target);
4261 GET_BBLOCK (cfg, bblock, ip);
4263 case MonoInlineSwitch: {
4264 guint32 n = read32 (ip + 1);
4267 cli_addr += 5 + 4 * n;
4268 target = start + cli_addr;
4269 GET_BBLOCK (cfg, bblock, target);
4271 for (j = 0; j < n; ++j) {
4272 target = start + cli_addr + (gint32)read32 (ip);
4273 GET_BBLOCK (cfg, bblock, target);
4283 g_assert_not_reached ();
4286 if (i == CEE_THROW) {
4287 unsigned char *bb_start = ip - 1;
4289 /* Find the start of the bblock containing the throw */
4291 while ((bb_start >= start) && !bblock) {
4292 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4296 bblock->out_of_line = 1;
4305 static inline MonoMethod *
4306 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4310 if (m->wrapper_type != MONO_WRAPPER_NONE)
4311 return mono_method_get_wrapper_data (m, token);
4313 method = mono_get_method_full (m->klass->image, token, klass, context);
4318 static inline MonoMethod *
4319 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4321 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4323 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4329 static inline MonoClass*
4330 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4334 if (method->wrapper_type != MONO_WRAPPER_NONE)
4335 klass = mono_method_get_wrapper_data (method, token);
4337 klass = mono_class_get_full (method->klass->image, token, context);
4339 mono_class_init (klass);
4344 * Returns TRUE if the JIT should abort inlining because "callee"
4345 * is influenced by security attributes.
4348 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4352 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4356 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4357 if (result == MONO_JIT_SECURITY_OK)
4360 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4361 /* Generate code to throw a SecurityException before the actual call/link */
4362 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4365 NEW_ICONST (cfg, args [0], 4);
4366 NEW_METHODCONST (cfg, args [1], caller);
4367 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4368 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4369 /* don't hide previous results */
4370 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4371 cfg->exception_data = result;
4379 method_access_exception (void)
4381 static MonoMethod *method = NULL;
4384 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4385 method = mono_class_get_method_from_name (secman->securitymanager,
4386 "MethodAccessException", 2);
4393 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4394 MonoBasicBlock *bblock, unsigned char *ip)
4396 MonoMethod *thrower = method_access_exception ();
4399 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4400 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4401 mono_emit_method_call (cfg, thrower, args, NULL);
4405 verification_exception (void)
4407 static MonoMethod *method = NULL;
4410 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4411 method = mono_class_get_method_from_name (secman->securitymanager,
4412 "VerificationException", 0);
4419 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4421 MonoMethod *thrower = verification_exception ();
4423 mono_emit_method_call (cfg, thrower, NULL, NULL);
4427 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4428 MonoBasicBlock *bblock, unsigned char *ip)
4430 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4431 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4432 gboolean is_safe = TRUE;
4434 if (!(caller_level >= callee_level ||
4435 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4436 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4441 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4445 method_is_safe (MonoMethod *method)
4448 if (strcmp (method->name, "unsafeMethod") == 0)
4455 * Check that the IL instructions at ip are the array initialization
4456 * sequence and return the pointer to the data and the size.
4459 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4462 * newarr[System.Int32]
4464 * ldtoken field valuetype ...
4465 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4467 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4468 guint32 token = read32 (ip + 7);
4469 guint32 field_token = read32 (ip + 2);
4470 guint32 field_index = field_token & 0xffffff;
4472 const char *data_ptr;
4474 MonoMethod *cmethod;
4475 MonoClass *dummy_class;
4476 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4482 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4485 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4487 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4488 case MONO_TYPE_BOOLEAN:
4492 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4493 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4494 case MONO_TYPE_CHAR:
4504 return NULL; /* stupid ARM FP swapped format */
4514 if (size > mono_type_size (field->type, &dummy_align))
4517 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4518 if (!method->klass->image->dynamic) {
4519 field_index = read32 (ip + 2) & 0xffffff;
4520 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4521 data_ptr = mono_image_rva_map (method->klass->image, rva);
4522 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4523 /* for aot code we do the lookup on load */
4524 if (aot && data_ptr)
4525 return GUINT_TO_POINTER (rva);
4527 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4529 data_ptr = field->data;
4537 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4539 char *method_fname = mono_method_full_name (method, TRUE);
4542 if (mono_method_get_header (method)->code_size == 0)
4543 method_code = g_strdup ("method body is empty.");
4545 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4546 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4547 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4548 g_free (method_fname);
4549 g_free (method_code);
4553 set_exception_object (MonoCompile *cfg, MonoException *exception)
4555 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4556 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4557 cfg->exception_ptr = exception;
4561 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4565 if (cfg->generic_sharing_context)
4566 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4568 type = &klass->byval_arg;
4569 return MONO_TYPE_IS_REFERENCE (type);
4573 * mono_decompose_array_access_opts:
4575 * Decompose array access opcodes.
4576 * This should be in decompose.c, but it emits calls so it has to stay here until
4577 * the old JIT is gone.
4580 mono_decompose_array_access_opts (MonoCompile *cfg)
4582 MonoBasicBlock *bb, *first_bb;
4585 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4586 * can be executed anytime. It should be run before decompose_long
4590 * Create a dummy bblock and emit code into it so we can use the normal
4591 * code generation macros.
4593 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4594 first_bb = cfg->cbb;
4596 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4598 MonoInst *prev = NULL;
4600 MonoInst *iargs [3];
4603 if (!bb->has_array_access)
4606 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4608 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4614 for (ins = bb->code; ins; ins = ins->next) {
4615 switch (ins->opcode) {
4617 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4618 G_STRUCT_OFFSET (MonoArray, max_length));
4619 MONO_ADD_INS (cfg->cbb, dest);
4621 case OP_BOUNDS_CHECK:
4622 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4625 if (cfg->opt & MONO_OPT_SHARED) {
4626 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4627 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4628 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4629 iargs [2]->dreg = ins->sreg1;
4631 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4632 dest->dreg = ins->dreg;
4634 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4637 NEW_VTABLECONST (cfg, iargs [0], vtable);
4638 MONO_ADD_INS (cfg->cbb, iargs [0]);
4639 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4640 iargs [1]->dreg = ins->sreg1;
4642 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4643 dest->dreg = ins->dreg;
4647 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4648 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4649 MONO_ADD_INS (cfg->cbb, dest);
4655 g_assert (cfg->cbb == first_bb);
4657 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4658 /* Replace the original instruction with the new code sequence */
4660 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4661 first_bb->code = first_bb->last_ins = NULL;
4662 first_bb->in_count = first_bb->out_count = 0;
4663 cfg->cbb = first_bb;
4670 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4680 #ifdef MONO_ARCH_SOFT_FLOAT
4683 * mono_decompose_soft_float:
4685 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4686 * similar to long support on 32 bit platforms. 32 bit float values require special
4687 * handling when used as locals, arguments, and in calls.
4688 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4691 mono_decompose_soft_float (MonoCompile *cfg)
4693 MonoBasicBlock *bb, *first_bb;
4696 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4700 * Create a dummy bblock and emit code into it so we can use the normal
4701 * code generation macros.
4703 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4704 first_bb = cfg->cbb;
4706 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4708 MonoInst *prev = NULL;
4711 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4713 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4719 for (ins = bb->code; ins; ins = ins->next) {
4720 const char *spec = INS_INFO (ins->opcode);
4722 /* Most fp operations are handled automatically by opcode emulation */
4724 switch (ins->opcode) {
4727 d.vald = *(double*)ins->inst_p0;
4728 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4733 /* We load the r8 value */
4734 d.vald = *(float*)ins->inst_p0;
4735 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4739 ins->opcode = OP_LMOVE;
4742 ins->opcode = OP_MOVE;
4743 ins->sreg1 = ins->sreg1 + 1;
4746 ins->opcode = OP_MOVE;
4747 ins->sreg1 = ins->sreg1 + 2;
4750 int reg = ins->sreg1;
4752 ins->opcode = OP_SETLRET;
4754 ins->sreg1 = reg + 1;
4755 ins->sreg2 = reg + 2;
4758 case OP_LOADR8_MEMBASE:
4759 ins->opcode = OP_LOADI8_MEMBASE;
4761 case OP_STORER8_MEMBASE_REG:
4762 ins->opcode = OP_STOREI8_MEMBASE_REG;
4764 case OP_STORER4_MEMBASE_REG: {
4765 MonoInst *iargs [2];
4768 /* Arg 1 is the double value */
4769 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4770 iargs [0]->dreg = ins->sreg1;
4772 /* Arg 2 is the address to store to */
4773 addr_reg = mono_alloc_preg (cfg);
4774 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4775 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4779 case OP_LOADR4_MEMBASE: {
4780 MonoInst *iargs [1];
4784 addr_reg = mono_alloc_preg (cfg);
4785 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4786 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4787 conv->dreg = ins->dreg;
4792 case OP_FCALL_MEMBASE: {
4793 MonoCallInst *call = (MonoCallInst*)ins;
4794 if (call->signature->ret->type == MONO_TYPE_R4) {
4795 MonoCallInst *call2;
4796 MonoInst *iargs [1];
4799 /* Convert the call into a call returning an int */
4800 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4801 memcpy (call2, call, sizeof (MonoCallInst));
4802 switch (ins->opcode) {
4804 call2->inst.opcode = OP_CALL;
4807 call2->inst.opcode = OP_CALL_REG;
4809 case OP_FCALL_MEMBASE:
4810 call2->inst.opcode = OP_CALL_MEMBASE;
4813 g_assert_not_reached ();
4815 call2->inst.dreg = mono_alloc_ireg (cfg);
4816 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4818 /* FIXME: Optimize this */
4820 /* Emit an r4->r8 conversion */
4821 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4822 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4823 conv->dreg = ins->dreg;
4825 switch (ins->opcode) {
4827 ins->opcode = OP_LCALL;
4830 ins->opcode = OP_LCALL_REG;
4832 case OP_FCALL_MEMBASE:
4833 ins->opcode = OP_LCALL_MEMBASE;
4836 g_assert_not_reached ();
4842 MonoJitICallInfo *info;
4843 MonoInst *iargs [2];
4844 MonoInst *call, *cmp, *br;
4846 /* Convert fcompare+fbcc to icall+icompare+beq */
4848 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4851 /* Create dummy MonoInst's for the arguments */
4852 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4853 iargs [0]->dreg = ins->sreg1;
4854 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4855 iargs [1]->dreg = ins->sreg2;
4857 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4859 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4860 cmp->sreg1 = call->dreg;
4862 MONO_ADD_INS (cfg->cbb, cmp);
4864 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4865 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4866 br->inst_true_bb = ins->next->inst_true_bb;
4867 br->inst_false_bb = ins->next->inst_false_bb;
4868 MONO_ADD_INS (cfg->cbb, br);
4870 /* The call sequence might include fp ins */
4873 /* Skip fbcc or fccc */
4874 NULLIFY_INS (ins->next);
4882 MonoJitICallInfo *info;
4883 MonoInst *iargs [2];
4886 /* Convert fccc to icall+icompare+iceq */
4888 info = mono_find_jit_opcode_emulation (ins->opcode);
4891 /* Create dummy MonoInst's for the arguments */
4892 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4893 iargs [0]->dreg = ins->sreg1;
4894 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4895 iargs [1]->dreg = ins->sreg2;
4897 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4900 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4902 /* The call sequence might include fp ins */
4907 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4908 mono_print_ins (ins);
4909 g_assert_not_reached ();
4914 g_assert (cfg->cbb == first_bb);
4916 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4917 /* Replace the original instruction with the new code sequence */
4919 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4920 first_bb->code = first_bb->last_ins = NULL;
4921 first_bb->in_count = first_bb->out_count = 0;
4922 cfg->cbb = first_bb;
4929 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4932 mono_decompose_long_opts (cfg);
4938 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4941 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4942 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4943 /* Optimize reg-reg moves away */
4945 * Can't optimize other opcodes, since sp[0] might point to
4946 * the last ins of a decomposed opcode.
4948 sp [0]->dreg = (cfg)->locals [n]->dreg;
4950 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4955 * ldloca inhibits many optimizations so try to get rid of it in common
4958 static inline unsigned char *
4959 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4968 local = read16 (ip + 2);
4972 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4973 gboolean skip = FALSE;
4975 /* From the INITOBJ case */
4976 token = read32 (ip + 2);
4977 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
4978 CHECK_TYPELOAD (klass);
4979 if (generic_class_is_reference_type (cfg, klass)) {
4980 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4981 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
4982 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4983 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
4984 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
4997 * mono_method_to_ir: translates IL into basic blocks containing trees
5000 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5001 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5002 guint inline_offset, gboolean is_virtual_call)
5004 MonoInst *ins, **sp, **stack_start;
5005 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5006 MonoMethod *cmethod, *method_definition;
5007 MonoInst **arg_array;
5008 MonoMethodHeader *header;
5010 guint32 token, ins_flag;
5012 MonoClass *constrained_call = NULL;
5013 unsigned char *ip, *end, *target, *err_pos;
5014 static double r8_0 = 0.0;
5015 MonoMethodSignature *sig;
5016 MonoGenericContext *generic_context = NULL;
5017 MonoGenericContainer *generic_container = NULL;
5018 MonoType **param_types;
5019 int i, n, start_new_bblock, dreg;
5020 int num_calls = 0, inline_costs = 0;
5021 int breakpoint_id = 0;
5023 MonoBoolean security, pinvoke;
5024 MonoSecurityManager* secman = NULL;
5025 MonoDeclSecurityActions actions;
5026 GSList *class_inits = NULL;
5027 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5030 /* serialization and xdomain stuff may need access to private fields and methods */
5031 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5032 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5033 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5034 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5035 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5036 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5038 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5040 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5041 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5042 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5043 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5045 image = method->klass->image;
5046 header = mono_method_get_header (method);
5047 generic_container = mono_method_get_generic_container (method);
5048 sig = mono_method_signature (method);
5049 num_args = sig->hasthis + sig->param_count;
5050 ip = (unsigned char*)header->code;
5051 cfg->cil_start = ip;
5052 end = ip + header->code_size;
5053 mono_jit_stats.cil_code_size += header->code_size;
5055 method_definition = method;
5056 while (method_definition->is_inflated) {
5057 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5058 method_definition = imethod->declaring;
5061 /* SkipVerification is not allowed if core-clr is enabled */
5062 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5064 dont_verify_stloc = TRUE;
5067 if (!dont_verify && mini_method_verify (cfg, method_definition))
5068 goto exception_exit;
5070 if (mono_debug_using_mono_debugger ())
5071 cfg->keep_cil_nops = TRUE;
5073 if (sig->is_inflated)
5074 generic_context = mono_method_get_context (method);
5075 else if (generic_container)
5076 generic_context = &generic_container->context;
5077 cfg->generic_context = generic_context;
5079 if (!cfg->generic_sharing_context)
5080 g_assert (!sig->has_type_parameters);
5082 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5083 g_assert (method->is_inflated);
5084 g_assert (mono_method_get_context (method)->method_inst);
5086 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5087 g_assert (sig->generic_param_count);
5089 if (cfg->method == method) {
5090 cfg->real_offset = 0;
5092 cfg->real_offset = inline_offset;
5095 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5096 cfg->cil_offset_to_bb_len = header->code_size;
5098 cfg->current_method = method;
5100 if (cfg->verbose_level > 2)
5101 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5103 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5105 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5106 for (n = 0; n < sig->param_count; ++n)
5107 param_types [n + sig->hasthis] = sig->params [n];
5108 cfg->arg_types = param_types;
5110 dont_inline = g_list_prepend (dont_inline, method);
5111 if (cfg->method == method) {
5113 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5114 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5117 NEW_BBLOCK (cfg, start_bblock);
5118 cfg->bb_entry = start_bblock;
5119 start_bblock->cil_code = NULL;
5120 start_bblock->cil_length = 0;
5123 NEW_BBLOCK (cfg, end_bblock);
5124 cfg->bb_exit = end_bblock;
5125 end_bblock->cil_code = NULL;
5126 end_bblock->cil_length = 0;
5127 g_assert (cfg->num_bblocks == 2);
5129 arg_array = cfg->args;
5131 if (header->num_clauses) {
5132 cfg->spvars = g_hash_table_new (NULL, NULL);
5133 cfg->exvars = g_hash_table_new (NULL, NULL);
5135 /* handle exception clauses */
5136 for (i = 0; i < header->num_clauses; ++i) {
5137 MonoBasicBlock *try_bb;
5138 MonoExceptionClause *clause = &header->clauses [i];
5139 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5140 try_bb->real_offset = clause->try_offset;
5141 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5142 tblock->real_offset = clause->handler_offset;
5143 tblock->flags |= BB_EXCEPTION_HANDLER;
5145 link_bblock (cfg, try_bb, tblock);
5147 if (*(ip + clause->handler_offset) == CEE_POP)
5148 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5150 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5151 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5152 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5153 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5154 MONO_ADD_INS (tblock, ins);
5156 /* todo: is a fault block unsafe to optimize? */
5157 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5158 tblock->flags |= BB_EXCEPTION_UNSAFE;
5162 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5164 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5166 /* catch and filter blocks get the exception object on the stack */
5167 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5168 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5169 MonoInst *dummy_use;
5171 /* mostly like handle_stack_args (), but just sets the input args */
5172 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5173 tblock->in_scount = 1;
5174 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5175 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5178 * Add a dummy use for the exvar so its liveness info will be
5182 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5184 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5185 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5186 tblock->real_offset = clause->data.filter_offset;
5187 tblock->in_scount = 1;
5188 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5189 /* The filter block shares the exvar with the handler block */
5190 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5191 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5192 MONO_ADD_INS (tblock, ins);
5196 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5197 clause->data.catch_class &&
5198 cfg->generic_sharing_context &&
5199 mono_class_check_context_used (clause->data.catch_class)) {
5200 if (mono_method_get_context (method)->method_inst)
5201 GENERIC_SHARING_FAILURE (CEE_NOP);
5204 * In shared generic code with catch
5205 * clauses containing type variables
5206 * the exception handling code has to
5207 * be able to get to the rgctx.
5208 * Therefore we have to make sure that
5209 * the vtable/mrgctx argument (for
5210 * static or generic methods) or the
5211 * "this" argument (for non-static
5212 * methods) are live.
5214 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5215 mini_method_get_context (method)->method_inst ||
5216 method->klass->valuetype) {
5217 mono_get_vtable_var (cfg);
5219 MonoInst *dummy_use;
5221 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5226 arg_array = alloca (sizeof (MonoInst *) * num_args);
5227 cfg->cbb = start_bblock;
5228 cfg->args = arg_array;
5229 mono_save_args (cfg, sig, inline_args);
5232 /* FIRST CODE BLOCK */
5233 NEW_BBLOCK (cfg, bblock);
5234 bblock->cil_code = ip;
5238 ADD_BBLOCK (cfg, bblock);
5240 if (cfg->method == method) {
5241 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5242 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5243 MONO_INST_NEW (cfg, ins, OP_BREAK);
5244 MONO_ADD_INS (bblock, ins);
5248 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5249 secman = mono_security_manager_get_methods ();
5251 security = (secman && mono_method_has_declsec (method));
5252 /* at this point having security doesn't mean we have any code to generate */
5253 if (security && (cfg->method == method)) {
5254 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5255 * And we do not want to enter the next section (with allocation) if we
5256 * have nothing to generate */
5257 security = mono_declsec_get_demands (method, &actions);
5260 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5261 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5263 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5264 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5265 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5267 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5268 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5272 mono_custom_attrs_free (custom);
5275 custom = mono_custom_attrs_from_class (wrapped->klass);
5276 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5280 mono_custom_attrs_free (custom);
5283 /* not a P/Invoke after all */
5288 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5289 /* we use a separate basic block for the initialization code */
5290 NEW_BBLOCK (cfg, init_localsbb);
5291 cfg->bb_init = init_localsbb;
5292 init_localsbb->real_offset = cfg->real_offset;
5293 start_bblock->next_bb = init_localsbb;
5294 init_localsbb->next_bb = bblock;
5295 link_bblock (cfg, start_bblock, init_localsbb);
5296 link_bblock (cfg, init_localsbb, bblock);
5298 cfg->cbb = init_localsbb;
5300 start_bblock->next_bb = bblock;
5301 link_bblock (cfg, start_bblock, bblock);
5304 /* at this point we know, if security is TRUE, that some code needs to be generated */
5305 if (security && (cfg->method == method)) {
5308 mono_jit_stats.cas_demand_generation++;
5310 if (actions.demand.blob) {
5311 /* Add code for SecurityAction.Demand */
5312 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5313 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5314 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5315 mono_emit_method_call (cfg, secman->demand, args, NULL);
5317 if (actions.noncasdemand.blob) {
5318 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5319 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5320 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5321 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5322 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5323 mono_emit_method_call (cfg, secman->demand, args, NULL);
5325 if (actions.demandchoice.blob) {
5326 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5327 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5328 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5329 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5330 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5334 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5336 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5339 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5340 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5341 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5342 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5343 if (!(method->klass && method->klass->image &&
5344 mono_security_core_clr_is_platform_image (method->klass->image))) {
5345 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5349 if (!method_is_safe (method))
5350 emit_throw_verification_exception (cfg, bblock, ip);
5353 if (header->code_size == 0)
5356 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5361 if (cfg->method == method)
5362 mono_debug_init_method (cfg, bblock, breakpoint_id);
5364 for (n = 0; n < header->num_locals; ++n) {
5365 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5370 /* add a check for this != NULL to inlined methods */
5371 if (is_virtual_call) {
5374 NEW_ARGLOAD (cfg, arg_ins, 0);
5375 MONO_ADD_INS (cfg->cbb, arg_ins);
5376 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5377 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5378 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5381 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5382 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5385 start_new_bblock = 0;
5389 if (cfg->method == method)
5390 cfg->real_offset = ip - header->code;
5392 cfg->real_offset = inline_offset;
5397 if (start_new_bblock) {
5398 bblock->cil_length = ip - bblock->cil_code;
5399 if (start_new_bblock == 2) {
5400 g_assert (ip == tblock->cil_code);
5402 GET_BBLOCK (cfg, tblock, ip);
5404 bblock->next_bb = tblock;
5407 start_new_bblock = 0;
5408 for (i = 0; i < bblock->in_scount; ++i) {
5409 if (cfg->verbose_level > 3)
5410 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5411 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5415 g_slist_free (class_inits);
5418 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5419 link_bblock (cfg, bblock, tblock);
5420 if (sp != stack_start) {
5421 handle_stack_args (cfg, stack_start, sp - stack_start);
5423 CHECK_UNVERIFIABLE (cfg);
5425 bblock->next_bb = tblock;
5428 for (i = 0; i < bblock->in_scount; ++i) {
5429 if (cfg->verbose_level > 3)
5430 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5431 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5434 g_slist_free (class_inits);
5439 bblock->real_offset = cfg->real_offset;
5441 if ((cfg->method == method) && cfg->coverage_info) {
5442 guint32 cil_offset = ip - header->code;
5443 cfg->coverage_info->data [cil_offset].cil_code = ip;
5445 /* TODO: Use an increment here */
5446 #if defined(__i386__)
5447 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5448 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5450 MONO_ADD_INS (cfg->cbb, ins);
5452 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5453 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5457 if (cfg->verbose_level > 3)
5458 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5462 if (cfg->keep_cil_nops)
5463 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5465 MONO_INST_NEW (cfg, ins, OP_NOP);
5467 MONO_ADD_INS (bblock, ins);
5470 MONO_INST_NEW (cfg, ins, OP_BREAK);
5472 MONO_ADD_INS (bblock, ins);
5478 CHECK_STACK_OVF (1);
5479 n = (*ip)-CEE_LDARG_0;
5481 EMIT_NEW_ARGLOAD (cfg, ins, n);
5489 CHECK_STACK_OVF (1);
5490 n = (*ip)-CEE_LDLOC_0;
5492 EMIT_NEW_LOCLOAD (cfg, ins, n);
5501 n = (*ip)-CEE_STLOC_0;
5504 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5506 emit_stloc_ir (cfg, sp, header, n);
5513 CHECK_STACK_OVF (1);
5516 EMIT_NEW_ARGLOAD (cfg, ins, n);
5522 CHECK_STACK_OVF (1);
5525 NEW_ARGLOADA (cfg, ins, n);
5526 MONO_ADD_INS (cfg->cbb, ins);
5536 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5538 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5543 CHECK_STACK_OVF (1);
5546 EMIT_NEW_LOCLOAD (cfg, ins, n);
5550 case CEE_LDLOCA_S: {
5551 unsigned char *tmp_ip;
5553 CHECK_STACK_OVF (1);
5554 CHECK_LOCAL (ip [1]);
5556 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5562 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5571 CHECK_LOCAL (ip [1]);
5572 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5574 emit_stloc_ir (cfg, sp, header, ip [1]);
5579 CHECK_STACK_OVF (1);
5580 EMIT_NEW_PCONST (cfg, ins, NULL);
5581 ins->type = STACK_OBJ;
5586 CHECK_STACK_OVF (1);
5587 EMIT_NEW_ICONST (cfg, ins, -1);
5600 CHECK_STACK_OVF (1);
5601 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5607 CHECK_STACK_OVF (1);
5609 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5615 CHECK_STACK_OVF (1);
5616 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5622 CHECK_STACK_OVF (1);
5623 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5624 ins->type = STACK_I8;
5625 ins->dreg = alloc_dreg (cfg, STACK_I8);
5627 ins->inst_l = (gint64)read64 (ip);
5628 MONO_ADD_INS (bblock, ins);
5634 /* FIXME: we should really allocate this only late in the compilation process */
5635 mono_domain_lock (cfg->domain);
5636 f = mono_domain_alloc (cfg->domain, sizeof (float));
5637 mono_domain_unlock (cfg->domain);
5639 CHECK_STACK_OVF (1);
5640 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5641 ins->type = STACK_R8;
5642 ins->dreg = alloc_dreg (cfg, STACK_R8);
5646 MONO_ADD_INS (bblock, ins);
5654 /* FIXME: we should really allocate this only late in the compilation process */
5655 mono_domain_lock (cfg->domain);
5656 d = mono_domain_alloc (cfg->domain, sizeof (double));
5657 mono_domain_unlock (cfg->domain);
5659 CHECK_STACK_OVF (1);
5660 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5661 ins->type = STACK_R8;
5662 ins->dreg = alloc_dreg (cfg, STACK_R8);
5666 MONO_ADD_INS (bblock, ins);
5673 MonoInst *temp, *store;
5675 CHECK_STACK_OVF (1);
5679 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5680 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5682 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5685 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5698 if (sp [0]->type == STACK_R8)
5699 /* we need to pop the value from the x86 FP stack */
5700 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5707 if (stack_start != sp)
5709 token = read32 (ip + 1);
5710 /* FIXME: check the signature matches */
5711 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5716 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5717 GENERIC_SHARING_FAILURE (CEE_JMP);
5719 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5720 if (check_linkdemand (cfg, method, cmethod))
5722 CHECK_CFG_EXCEPTION;
5727 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5730 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5732 /* Handle tail calls similarly to calls */
5733 n = fsig->param_count + fsig->hasthis;
5735 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5736 call->method = cmethod;
5737 call->tail_call = TRUE;
5738 call->signature = mono_method_signature (cmethod);
5739 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5740 call->inst.inst_p0 = cmethod;
5741 for (i = 0; i < n; ++i)
5742 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5744 mono_arch_emit_call (cfg, call);
5745 MONO_ADD_INS (bblock, (MonoInst*)call);
5748 for (i = 0; i < num_args; ++i)
5749 /* Prevent arguments from being optimized away */
5750 arg_array [i]->flags |= MONO_INST_VOLATILE;
5752 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5753 ins = (MonoInst*)call;
5754 ins->inst_p0 = cmethod;
5755 MONO_ADD_INS (bblock, ins);
5759 start_new_bblock = 1;
5764 case CEE_CALLVIRT: {
5765 MonoInst *addr = NULL;
5766 MonoMethodSignature *fsig = NULL;
5768 int virtual = *ip == CEE_CALLVIRT;
5769 int calli = *ip == CEE_CALLI;
5770 gboolean pass_imt_from_rgctx = FALSE;
5771 MonoInst *imt_arg = NULL;
5772 gboolean pass_vtable = FALSE;
5773 gboolean pass_mrgctx = FALSE;
5774 MonoInst *vtable_arg = NULL;
5775 gboolean check_this = FALSE;
5778 token = read32 (ip + 1);
5785 if (method->wrapper_type != MONO_WRAPPER_NONE)
5786 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5788 fsig = mono_metadata_parse_signature (image, token);
5790 n = fsig->param_count + fsig->hasthis;
5792 MonoMethod *cil_method;
5794 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5795 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5796 cil_method = cmethod;
5797 } else if (constrained_call) {
5798 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5800 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5801 cil_method = cmethod;
5806 if (!dont_verify && !cfg->skip_visibility) {
5807 MonoMethod *target_method = cil_method;
5808 if (method->is_inflated) {
5809 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5811 if (!mono_method_can_access_method (method_definition, target_method) &&
5812 !mono_method_can_access_method (method, cil_method))
5813 METHOD_ACCESS_FAILURE;
5816 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5817 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5819 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5820 /* MS.NET seems to silently convert this to a callvirt */
5823 if (!cmethod->klass->inited)
5824 if (!mono_class_init (cmethod->klass))
5827 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5828 mini_class_is_system_array (cmethod->klass)) {
5829 array_rank = cmethod->klass->rank;
5830 fsig = mono_method_signature (cmethod);
5832 if (mono_method_signature (cmethod)->pinvoke) {
5833 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5834 check_for_pending_exc, FALSE);
5835 fsig = mono_method_signature (wrapper);
5836 } else if (constrained_call) {
5837 fsig = mono_method_signature (cmethod);
5839 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5843 mono_save_token_info (cfg, image, token, cil_method);
5845 n = fsig->param_count + fsig->hasthis;
5847 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5848 if (check_linkdemand (cfg, method, cmethod))
5850 CHECK_CFG_EXCEPTION;
5853 if (cmethod->string_ctor)
5854 g_assert_not_reached ();
5857 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5860 if (!cfg->generic_sharing_context && cmethod)
5861 g_assert (!mono_method_check_context_used (cmethod));
5865 //g_assert (!virtual || fsig->hasthis);
5869 if (constrained_call) {
5871 * We have the `constrained.' prefix opcode.
5873 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5877 * The type parameter is instantiated as a valuetype,
5878 * but that type doesn't override the method we're
5879 * calling, so we need to box `this'.
5881 dreg = alloc_dreg (cfg, STACK_VTYPE);
5882 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5883 ins->klass = constrained_call;
5884 sp [0] = handle_box (cfg, ins, constrained_call);
5885 } else if (!constrained_call->valuetype) {
5886 int dreg = alloc_preg (cfg);
5889 * The type parameter is instantiated as a reference
5890 * type. We have a managed pointer on the stack, so
5891 * we need to dereference it here.
5893 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5894 ins->type = STACK_OBJ;
5896 } else if (cmethod->klass->valuetype)
5898 constrained_call = NULL;
5901 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5905 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5906 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5907 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5908 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5909 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5912 * Pass vtable iff target method might
5913 * be shared, which means that sharing
5914 * is enabled for its class and its
5915 * context is sharable (and it's not a
5918 if (sharing_enabled && context_sharable &&
5919 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5923 if (cmethod && mini_method_get_context (cmethod) &&
5924 mini_method_get_context (cmethod)->method_inst) {
5925 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5926 MonoGenericContext *context = mini_method_get_context (cmethod);
5927 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5929 g_assert (!pass_vtable);
5931 if (sharing_enabled && context_sharable)
5935 if (cfg->generic_sharing_context && cmethod) {
5936 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5938 context_used = mono_method_check_context_used (cmethod);
5940 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5941 /* Generic method interface
5942 calls are resolved via a
5943 helper function and don't
5945 if (!cmethod_context || !cmethod_context->method_inst)
5946 pass_imt_from_rgctx = TRUE;
5950 * If a shared method calls another
5951 * shared method then the caller must
5952 * have a generic sharing context
5953 * because the magic trampoline
5954 * requires it. FIXME: We shouldn't
5955 * have to force the vtable/mrgctx
5956 * variable here. Instead there
5957 * should be a flag in the cfg to
5958 * request a generic sharing context.
5961 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5962 mono_get_vtable_var (cfg);
5967 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5969 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5971 CHECK_TYPELOAD (cmethod->klass);
5972 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5977 g_assert (!vtable_arg);
5980 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5982 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5985 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5986 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5993 if (pass_imt_from_rgctx) {
5994 g_assert (!pass_vtable);
5997 imt_arg = emit_get_rgctx_method (cfg, context_used,
5998 cmethod, MONO_RGCTX_INFO_METHOD);
6004 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6005 check->sreg1 = sp [0]->dreg;
6006 MONO_ADD_INS (cfg->cbb, check);
6009 /* Calling virtual generic methods */
6010 if (cmethod && virtual &&
6011 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6012 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
6013 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6014 mono_method_signature (cmethod)->generic_param_count) {
6015 MonoInst *this_temp, *this_arg_temp, *store;
6016 MonoInst *iargs [4];
6018 g_assert (mono_method_signature (cmethod)->is_inflated);
6020 /* Prevent inlining of methods that contain indirect calls */
6023 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6024 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6025 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6026 g_assert (!imt_arg);
6028 imt_arg = emit_get_rgctx_method (cfg, context_used,
6029 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6032 g_assert (!cfg->compile_aot);
6033 g_assert (cmethod->is_inflated);
6034 EMIT_NEW_PCONST (cfg, imt_arg,
6035 ((MonoMethodInflated*)cmethod)->context.method_inst);
6037 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6041 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6042 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6043 MONO_ADD_INS (bblock, store);
6045 /* FIXME: This should be a managed pointer */
6046 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6048 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6050 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6051 cmethod, MONO_RGCTX_INFO_METHOD);
6052 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6053 addr = mono_emit_jit_icall (cfg,
6054 mono_helper_compile_generic_method, iargs);
6056 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6057 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6058 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6061 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6063 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6066 if (!MONO_TYPE_IS_VOID (fsig->ret))
6075 /* FIXME: runtime generic context pointer for jumps? */
6076 /* FIXME: handle this for generic sharing eventually */
6077 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6078 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6081 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6084 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6085 call->tail_call = TRUE;
6086 call->method = cmethod;
6087 call->signature = mono_method_signature (cmethod);
6090 /* Handle tail calls similarly to calls */
6091 call->inst.opcode = OP_TAILCALL;
6093 mono_arch_emit_call (cfg, call);
6096 * We implement tail calls by storing the actual arguments into the
6097 * argument variables, then emitting a CEE_JMP.
6099 for (i = 0; i < n; ++i) {
6100 /* Prevent argument from being register allocated */
6101 arg_array [i]->flags |= MONO_INST_VOLATILE;
6102 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6106 ins = (MonoInst*)call;
6107 ins->inst_p0 = cmethod;
6108 ins->inst_p1 = arg_array [0];
6109 MONO_ADD_INS (bblock, ins);
6110 link_bblock (cfg, bblock, end_bblock);
6111 start_new_bblock = 1;
6112 /* skip CEE_RET as well */
6118 /* Conversion to a JIT intrinsic */
6119 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6120 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6121 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6132 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6133 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6134 mono_method_check_inlining (cfg, cmethod) &&
6135 !g_list_find (dont_inline, cmethod)) {
6137 gboolean allways = FALSE;
6139 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6140 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6141 /* Prevent inlining of methods that call wrappers */
6143 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6147 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6149 cfg->real_offset += 5;
6152 if (!MONO_TYPE_IS_VOID (fsig->ret))
6153 /* *sp is already set by inline_method */
6156 inline_costs += costs;
6162 inline_costs += 10 * num_calls++;
6164 /* Tail recursion elimination */
6165 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6166 gboolean has_vtargs = FALSE;
6169 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6172 /* keep it simple */
6173 for (i = fsig->param_count - 1; i >= 0; i--) {
6174 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6179 for (i = 0; i < n; ++i)
6180 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6181 MONO_INST_NEW (cfg, ins, OP_BR);
6182 MONO_ADD_INS (bblock, ins);
6183 tblock = start_bblock->out_bb [0];
6184 link_bblock (cfg, bblock, tblock);
6185 ins->inst_target_bb = tblock;
6186 start_new_bblock = 1;
6188 /* skip the CEE_RET, too */
6189 if (ip_in_bb (cfg, bblock, ip + 5))
6199 /* Generic sharing */
6200 /* FIXME: only do this for generic methods if
6201 they are not shared! */
6202 if (context_used && !imt_arg && !array_rank &&
6203 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6204 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6205 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6206 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6209 g_assert (cfg->generic_sharing_context && cmethod);
6213 * We are compiling a call to a
6214 * generic method from shared code,
6215 * which means that we have to look up
6216 * the method in the rgctx and do an
6219 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6222 /* Indirect calls */
6224 g_assert (!imt_arg);
6226 if (*ip == CEE_CALL)
6227 g_assert (context_used);
6228 else if (*ip == CEE_CALLI)
6229 g_assert (!vtable_arg);
6231 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6232 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6234 /* Prevent inlining of methods with indirect calls */
6238 #ifdef MONO_ARCH_RGCTX_REG
6240 int rgctx_reg = mono_alloc_preg (cfg);
6242 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6243 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6244 call = (MonoCallInst*)ins;
6245 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6246 cfg->uses_rgctx_reg = TRUE;
6251 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6253 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6254 if (fsig->pinvoke && !fsig->ret->byref) {
6258 * Native code might return non register sized integers
6259 * without initializing the upper bits.
6261 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6262 case OP_LOADI1_MEMBASE:
6263 widen_op = OP_ICONV_TO_I1;
6265 case OP_LOADU1_MEMBASE:
6266 widen_op = OP_ICONV_TO_U1;
6268 case OP_LOADI2_MEMBASE:
6269 widen_op = OP_ICONV_TO_I2;
6271 case OP_LOADU2_MEMBASE:
6272 widen_op = OP_ICONV_TO_U2;
6278 if (widen_op != -1) {
6279 int dreg = alloc_preg (cfg);
6282 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6283 widen->type = ins->type;
6300 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6301 if (sp [fsig->param_count]->type == STACK_OBJ) {
6302 MonoInst *iargs [2];
6305 iargs [1] = sp [fsig->param_count];
6307 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6310 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6311 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6312 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6313 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6315 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6318 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6319 if (!cmethod->klass->element_class->valuetype && !readonly)
6320 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6323 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6326 g_assert_not_reached ();
6334 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6336 if (!MONO_TYPE_IS_VOID (fsig->ret))
6347 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6349 } else if (imt_arg) {
6350 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6352 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6355 if (!MONO_TYPE_IS_VOID (fsig->ret))
6363 if (cfg->method != method) {
6364 /* return from inlined method */
6366 * If in_count == 0, that means the ret is unreachable due to
6367 * being preceeded by a throw. In that case, inline_method () will
6368 * handle setting the return value
6369 * (test case: test_0_inline_throw ()).
6371 if (return_var && cfg->cbb->in_count) {
6375 //g_assert (returnvar != -1);
6376 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6377 cfg->ret_var_set = TRUE;
6381 MonoType *ret_type = mono_method_signature (method)->ret;
6383 g_assert (!return_var);
6386 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6389 if (!cfg->vret_addr) {
6392 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6394 EMIT_NEW_RETLOADA (cfg, ret_addr);
6396 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6397 ins->klass = mono_class_from_mono_type (ret_type);
6400 #ifdef MONO_ARCH_SOFT_FLOAT
6401 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6402 MonoInst *iargs [1];
6406 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6407 mono_arch_emit_setret (cfg, method, conv);
6409 mono_arch_emit_setret (cfg, method, *sp);
6412 mono_arch_emit_setret (cfg, method, *sp);
6417 if (sp != stack_start)
6419 MONO_INST_NEW (cfg, ins, OP_BR);
6421 ins->inst_target_bb = end_bblock;
6422 MONO_ADD_INS (bblock, ins);
6423 link_bblock (cfg, bblock, end_bblock);
6424 start_new_bblock = 1;
6428 MONO_INST_NEW (cfg, ins, OP_BR);
6430 target = ip + 1 + (signed char)(*ip);
6432 GET_BBLOCK (cfg, tblock, target);
6433 link_bblock (cfg, bblock, tblock);
6434 ins->inst_target_bb = tblock;
6435 if (sp != stack_start) {
6436 handle_stack_args (cfg, stack_start, sp - stack_start);
6438 CHECK_UNVERIFIABLE (cfg);
6440 MONO_ADD_INS (bblock, ins);
6441 start_new_bblock = 1;
6442 inline_costs += BRANCH_COST;
6456 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6458 target = ip + 1 + *(signed char*)ip;
6464 inline_costs += BRANCH_COST;
6468 MONO_INST_NEW (cfg, ins, OP_BR);
6471 target = ip + 4 + (gint32)read32(ip);
6473 GET_BBLOCK (cfg, tblock, target);
6474 link_bblock (cfg, bblock, tblock);
6475 ins->inst_target_bb = tblock;
6476 if (sp != stack_start) {
6477 handle_stack_args (cfg, stack_start, sp - stack_start);
6479 CHECK_UNVERIFIABLE (cfg);
6482 MONO_ADD_INS (bblock, ins);
6484 start_new_bblock = 1;
6485 inline_costs += BRANCH_COST;
6492 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6493 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6494 guint32 opsize = is_short ? 1 : 4;
6496 CHECK_OPSIZE (opsize);
6498 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6501 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6506 GET_BBLOCK (cfg, tblock, target);
6507 link_bblock (cfg, bblock, tblock);
6508 GET_BBLOCK (cfg, tblock, ip);
6509 link_bblock (cfg, bblock, tblock);
6511 if (sp != stack_start) {
6512 handle_stack_args (cfg, stack_start, sp - stack_start);
6513 CHECK_UNVERIFIABLE (cfg);
6516 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6517 cmp->sreg1 = sp [0]->dreg;
6518 type_from_op (cmp, sp [0], NULL);
6521 #if SIZEOF_VOID_P == 4
6522 if (cmp->opcode == OP_LCOMPARE_IMM) {
6523 /* Convert it to OP_LCOMPARE */
6524 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6525 ins->type = STACK_I8;
6526 ins->dreg = alloc_dreg (cfg, STACK_I8);
6528 MONO_ADD_INS (bblock, ins);
6529 cmp->opcode = OP_LCOMPARE;
6530 cmp->sreg2 = ins->dreg;
6533 MONO_ADD_INS (bblock, cmp);
6535 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6536 type_from_op (ins, sp [0], NULL);
6537 MONO_ADD_INS (bblock, ins);
6538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6539 GET_BBLOCK (cfg, tblock, target);
6540 ins->inst_true_bb = tblock;
6541 GET_BBLOCK (cfg, tblock, ip);
6542 ins->inst_false_bb = tblock;
6543 start_new_bblock = 2;
6546 inline_costs += BRANCH_COST;
6561 MONO_INST_NEW (cfg, ins, *ip);
6563 target = ip + 4 + (gint32)read32(ip);
6569 inline_costs += BRANCH_COST;
6573 MonoBasicBlock **targets;
6574 MonoBasicBlock *default_bblock;
6575 MonoJumpInfoBBTable *table;
6577 int offset_reg = alloc_preg (cfg);
6578 int target_reg = alloc_preg (cfg);
6579 int table_reg = alloc_preg (cfg);
6580 int sum_reg = alloc_preg (cfg);
6585 n = read32 (ip + 1);
6588 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6592 CHECK_OPSIZE (n * sizeof (guint32));
6593 target = ip + n * sizeof (guint32);
6595 GET_BBLOCK (cfg, default_bblock, target);
6597 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6598 for (i = 0; i < n; ++i) {
6599 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6600 targets [i] = tblock;
6604 if (sp != stack_start) {
6606 * Link the current bb with the targets as well, so handle_stack_args
6607 * will set their in_stack correctly.
6609 link_bblock (cfg, bblock, default_bblock);
6610 for (i = 0; i < n; ++i)
6611 link_bblock (cfg, bblock, targets [i]);
6613 handle_stack_args (cfg, stack_start, sp - stack_start);
6615 CHECK_UNVERIFIABLE (cfg);
6618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6622 for (i = 0; i < n; ++i)
6623 link_bblock (cfg, bblock, targets [i]);
6625 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6626 table->table = targets;
6627 table->table_size = n;
6630 /* ARM implements SWITCH statements differently */
6631 /* FIXME: Make it use the generic implementation */
6632 /* the backend code will deal with aot vs normal case */
6633 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6634 ins->sreg1 = src1->dreg;
6635 ins->inst_p0 = table;
6636 ins->inst_many_bb = targets;
6637 ins->klass = GUINT_TO_POINTER (n);
6638 MONO_ADD_INS (cfg->cbb, ins);
6640 if (sizeof (gpointer) == 8)
6641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6645 #if SIZEOF_VOID_P == 8
6646 /* The upper word might not be zero, and we add it to a 64 bit address later */
6647 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6650 if (cfg->compile_aot) {
6651 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6653 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6654 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6655 ins->inst_p0 = table;
6656 ins->dreg = table_reg;
6657 MONO_ADD_INS (cfg->cbb, ins);
6660 /* FIXME: Use load_memindex */
6661 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6663 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6665 start_new_bblock = 1;
6666 inline_costs += (BRANCH_COST * 2);
6686 dreg = alloc_freg (cfg);
6689 dreg = alloc_lreg (cfg);
6692 dreg = alloc_preg (cfg);
6695 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6696 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6697 ins->flags |= ins_flag;
6699 MONO_ADD_INS (bblock, ins);
6714 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6715 ins->flags |= ins_flag;
6717 MONO_ADD_INS (bblock, ins);
6725 MONO_INST_NEW (cfg, ins, (*ip));
6727 ins->sreg1 = sp [0]->dreg;
6728 ins->sreg2 = sp [1]->dreg;
6729 type_from_op (ins, sp [0], sp [1]);
6731 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6733 /* Use the immediate opcodes if possible */
6734 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6735 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6736 if (imm_opcode != -1) {
6737 ins->opcode = imm_opcode;
6738 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6741 sp [1]->opcode = OP_NOP;
6745 MONO_ADD_INS ((cfg)->cbb, (ins));
6748 mono_decompose_opcode (cfg, ins);
6765 MONO_INST_NEW (cfg, ins, (*ip));
6767 ins->sreg1 = sp [0]->dreg;
6768 ins->sreg2 = sp [1]->dreg;
6769 type_from_op (ins, sp [0], sp [1]);
6771 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6772 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6774 /* FIXME: Pass opcode to is_inst_imm */
6776 /* Use the immediate opcodes if possible */
6777 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6780 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6781 if (imm_opcode != -1) {
6782 ins->opcode = imm_opcode;
6783 if (sp [1]->opcode == OP_I8CONST) {
6784 #if SIZEOF_VOID_P == 8
6785 ins->inst_imm = sp [1]->inst_l;
6787 ins->inst_ls_word = sp [1]->inst_ls_word;
6788 ins->inst_ms_word = sp [1]->inst_ms_word;
6792 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6795 sp [1]->opcode = OP_NOP;
6798 MONO_ADD_INS ((cfg)->cbb, (ins));
6801 mono_decompose_opcode (cfg, ins);
6814 case CEE_CONV_OVF_I8:
6815 case CEE_CONV_OVF_U8:
6819 /* Special case this earlier so we have long constants in the IR */
6820 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6821 int data = sp [-1]->inst_c0;
6822 sp [-1]->opcode = OP_I8CONST;
6823 sp [-1]->type = STACK_I8;
6824 #if SIZEOF_VOID_P == 8
6825 if ((*ip) == CEE_CONV_U8)
6826 sp [-1]->inst_c0 = (guint32)data;
6828 sp [-1]->inst_c0 = data;
6830 sp [-1]->inst_ls_word = data;
6831 if ((*ip) == CEE_CONV_U8)
6832 sp [-1]->inst_ms_word = 0;
6834 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6836 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6843 case CEE_CONV_OVF_I4:
6844 case CEE_CONV_OVF_I1:
6845 case CEE_CONV_OVF_I2:
6846 case CEE_CONV_OVF_I:
6847 case CEE_CONV_OVF_U:
6850 if (sp [-1]->type == STACK_R8) {
6851 ADD_UNOP (CEE_CONV_OVF_I8);
6858 case CEE_CONV_OVF_U1:
6859 case CEE_CONV_OVF_U2:
6860 case CEE_CONV_OVF_U4:
6863 if (sp [-1]->type == STACK_R8) {
6864 ADD_UNOP (CEE_CONV_OVF_U8);
6871 case CEE_CONV_OVF_I1_UN:
6872 case CEE_CONV_OVF_I2_UN:
6873 case CEE_CONV_OVF_I4_UN:
6874 case CEE_CONV_OVF_I8_UN:
6875 case CEE_CONV_OVF_U1_UN:
6876 case CEE_CONV_OVF_U2_UN:
6877 case CEE_CONV_OVF_U4_UN:
6878 case CEE_CONV_OVF_U8_UN:
6879 case CEE_CONV_OVF_I_UN:
6880 case CEE_CONV_OVF_U_UN:
6890 case CEE_ADD_OVF_UN:
6892 case CEE_MUL_OVF_UN:
6894 case CEE_SUB_OVF_UN:
6902 token = read32 (ip + 1);
6903 klass = mini_get_class (method, token, generic_context);
6904 CHECK_TYPELOAD (klass);
6906 if (generic_class_is_reference_type (cfg, klass)) {
6907 MonoInst *store, *load;
6908 int dreg = alloc_preg (cfg);
6910 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6911 load->flags |= ins_flag;
6912 MONO_ADD_INS (cfg->cbb, load);
6914 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6915 store->flags |= ins_flag;
6916 MONO_ADD_INS (cfg->cbb, store);
6918 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6930 token = read32 (ip + 1);
6931 klass = mini_get_class (method, token, generic_context);
6932 CHECK_TYPELOAD (klass);
6934 /* Optimize the common ldobj+stloc combination */
6944 loc_index = ip [5] - CEE_STLOC_0;
6951 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6952 CHECK_LOCAL (loc_index);
6954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6955 ins->dreg = cfg->locals [loc_index]->dreg;
6961 /* Optimize the ldobj+stobj combination */
6962 /* The reference case ends up being a load+store anyway */
6963 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6968 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6975 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6984 CHECK_STACK_OVF (1);
6986 n = read32 (ip + 1);
6988 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6989 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6990 ins->type = STACK_OBJ;
6993 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6994 MonoInst *iargs [1];
6996 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6997 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6999 if (cfg->opt & MONO_OPT_SHARED) {
7000 MonoInst *iargs [3];
7002 if (cfg->compile_aot) {
7003 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7005 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7006 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7007 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7008 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7009 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7011 if (bblock->out_of_line) {
7012 MonoInst *iargs [2];
7014 if (cfg->method->klass->image == mono_defaults.corlib) {
7016 * Avoid relocations in AOT and save some space by using a
7017 * version of helper_ldstr specialized to mscorlib.
7019 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7020 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7022 /* Avoid creating the string object */
7023 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7024 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7025 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7029 if (cfg->compile_aot) {
7030 NEW_LDSTRCONST (cfg, ins, image, n);
7032 MONO_ADD_INS (bblock, ins);
7035 NEW_PCONST (cfg, ins, NULL);
7036 ins->type = STACK_OBJ;
7037 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7039 MONO_ADD_INS (bblock, ins);
7048 MonoInst *iargs [2];
7049 MonoMethodSignature *fsig;
7052 MonoInst *vtable_arg = NULL;
7055 token = read32 (ip + 1);
7056 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7059 fsig = mono_method_get_signature (cmethod, image, token);
7061 mono_save_token_info (cfg, image, token, cmethod);
7063 if (!mono_class_init (cmethod->klass))
7066 if (cfg->generic_sharing_context)
7067 context_used = mono_method_check_context_used (cmethod);
7069 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7070 if (check_linkdemand (cfg, method, cmethod))
7072 CHECK_CFG_EXCEPTION;
7073 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7074 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7077 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7078 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7079 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7081 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7082 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7084 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7088 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7089 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7091 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7093 CHECK_TYPELOAD (cmethod->klass);
7094 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7099 n = fsig->param_count;
7103 * Generate smaller code for the common newobj <exception> instruction in
7104 * argument checking code.
7106 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7107 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7108 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7109 MonoInst *iargs [3];
7111 g_assert (!vtable_arg);
7115 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7118 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7122 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7127 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7130 g_assert_not_reached ();
7138 /* move the args to allow room for 'this' in the first position */
7144 /* check_call_signature () requires sp[0] to be set */
7145 this_ins.type = STACK_OBJ;
7147 if (check_call_signature (cfg, fsig, sp))
7152 if (mini_class_is_system_array (cmethod->klass)) {
7154 GENERIC_SHARING_FAILURE (*ip);
7155 g_assert (!context_used);
7156 g_assert (!vtable_arg);
7157 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7159 /* Avoid varargs in the common case */
7160 if (fsig->param_count == 1)
7161 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7162 else if (fsig->param_count == 2)
7163 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7165 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7166 } else if (cmethod->string_ctor) {
7167 g_assert (!context_used);
7168 g_assert (!vtable_arg);
7169 /* we simply pass a null pointer */
7170 EMIT_NEW_PCONST (cfg, *sp, NULL);
7171 /* now call the string ctor */
7172 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7174 MonoInst* callvirt_this_arg = NULL;
7176 if (cmethod->klass->valuetype) {
7177 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7178 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7179 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7184 * The code generated by mini_emit_virtual_call () expects
7185 * iargs [0] to be a boxed instance, but luckily the vcall
7186 * will be transformed into a normal call there.
7188 } else if (context_used) {
7192 if (cfg->opt & MONO_OPT_SHARED)
7193 rgctx_info = MONO_RGCTX_INFO_KLASS;
7195 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7196 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7198 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7201 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7203 CHECK_TYPELOAD (cmethod->klass);
7206 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7207 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7208 * As a workaround, we call class cctors before allocating objects.
7210 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7211 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7212 if (cfg->verbose_level > 2)
7213 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7214 class_inits = g_slist_prepend (class_inits, vtable);
7217 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7222 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7224 /* Now call the actual ctor */
7225 /* Avoid virtual calls to ctors if possible */
7226 if (cmethod->klass->marshalbyref)
7227 callvirt_this_arg = sp [0];
7229 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7230 mono_method_check_inlining (cfg, cmethod) &&
7231 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7232 !g_list_find (dont_inline, cmethod)) {
7235 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7236 cfg->real_offset += 5;
7239 inline_costs += costs - 5;
7242 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7244 } else if (context_used &&
7245 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7246 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7247 MonoInst *cmethod_addr;
7249 g_assert (!callvirt_this_arg);
7251 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7252 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7254 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7257 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7258 callvirt_this_arg, NULL, vtable_arg);
7262 if (alloc == NULL) {
7264 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7265 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7279 token = read32 (ip + 1);
7280 klass = mini_get_class (method, token, generic_context);
7281 CHECK_TYPELOAD (klass);
7282 if (sp [0]->type != STACK_OBJ)
7285 if (cfg->generic_sharing_context)
7286 context_used = mono_class_check_context_used (klass);
7295 args [1] = emit_get_rgctx_klass (cfg, context_used,
7296 klass, MONO_RGCTX_INFO_KLASS);
7298 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7302 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7303 MonoMethod *mono_castclass;
7304 MonoInst *iargs [1];
7307 mono_castclass = mono_marshal_get_castclass (klass);
7310 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7311 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7312 g_assert (costs > 0);
7315 cfg->real_offset += 5;
7320 inline_costs += costs;
7323 ins = handle_castclass (cfg, klass, *sp);
7333 token = read32 (ip + 1);
7334 klass = mini_get_class (method, token, generic_context);
7335 CHECK_TYPELOAD (klass);
7336 if (sp [0]->type != STACK_OBJ)
7339 if (cfg->generic_sharing_context)
7340 context_used = mono_class_check_context_used (klass);
7349 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7351 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7355 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7356 MonoMethod *mono_isinst;
7357 MonoInst *iargs [1];
7360 mono_isinst = mono_marshal_get_isinst (klass);
7363 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7364 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7365 g_assert (costs > 0);
7368 cfg->real_offset += 5;
7373 inline_costs += costs;
7376 ins = handle_isinst (cfg, klass, *sp);
7383 case CEE_UNBOX_ANY: {
7387 token = read32 (ip + 1);
7388 klass = mini_get_class (method, token, generic_context);
7389 CHECK_TYPELOAD (klass);
7391 mono_save_token_info (cfg, image, token, klass);
7393 if (cfg->generic_sharing_context)
7394 context_used = mono_class_check_context_used (klass);
7396 if (generic_class_is_reference_type (cfg, klass)) {
7399 MonoInst *iargs [2];
7404 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7405 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7409 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7410 MonoMethod *mono_castclass;
7411 MonoInst *iargs [1];
7414 mono_castclass = mono_marshal_get_castclass (klass);
7417 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7418 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7420 g_assert (costs > 0);
7423 cfg->real_offset += 5;
7427 inline_costs += costs;
7429 ins = handle_castclass (cfg, klass, *sp);
7437 if (mono_class_is_nullable (klass)) {
7438 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7445 ins = handle_unbox (cfg, klass, sp, context_used);
7451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7464 token = read32 (ip + 1);
7465 klass = mini_get_class (method, token, generic_context);
7466 CHECK_TYPELOAD (klass);
7468 mono_save_token_info (cfg, image, token, klass);
7470 if (cfg->generic_sharing_context)
7471 context_used = mono_class_check_context_used (klass);
7473 if (generic_class_is_reference_type (cfg, klass)) {
7479 if (klass == mono_defaults.void_class)
7481 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7483 /* frequent check in generic code: box (struct), brtrue */
7484 if (!mono_class_is_nullable (klass) &&
7485 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7486 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7488 MONO_INST_NEW (cfg, ins, OP_BR);
7489 if (*ip == CEE_BRTRUE_S) {
7492 target = ip + 1 + (signed char)(*ip);
7497 target = ip + 4 + (gint)(read32 (ip));
7500 GET_BBLOCK (cfg, tblock, target);
7501 link_bblock (cfg, bblock, tblock);
7502 ins->inst_target_bb = tblock;
7503 GET_BBLOCK (cfg, tblock, ip);
7505 * This leads to some inconsistency, since the two bblocks are not
7506 * really connected, but it is needed for handling stack arguments
7507 * correct (See test_0_box_brtrue_opt_regress_81102).
7509 link_bblock (cfg, bblock, tblock);
7510 if (sp != stack_start) {
7511 handle_stack_args (cfg, stack_start, sp - stack_start);
7513 CHECK_UNVERIFIABLE (cfg);
7515 MONO_ADD_INS (bblock, ins);
7516 start_new_bblock = 1;
7524 if (cfg->opt & MONO_OPT_SHARED)
7525 rgctx_info = MONO_RGCTX_INFO_KLASS;
7527 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7528 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7529 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7531 *sp++ = handle_box (cfg, val, klass);
7542 token = read32 (ip + 1);
7543 klass = mini_get_class (method, token, generic_context);
7544 CHECK_TYPELOAD (klass);
7546 mono_save_token_info (cfg, image, token, klass);
7548 if (cfg->generic_sharing_context)
7549 context_used = mono_class_check_context_used (klass);
7551 if (mono_class_is_nullable (klass)) {
7554 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7555 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7559 ins = handle_unbox (cfg, klass, sp, context_used);
7569 MonoClassField *field;
7573 if (*ip == CEE_STFLD) {
7580 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7582 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7585 token = read32 (ip + 1);
7586 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7587 field = mono_method_get_wrapper_data (method, token);
7588 klass = field->parent;
7591 field = mono_field_from_token (image, token, &klass, generic_context);
7595 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7596 FIELD_ACCESS_FAILURE;
7597 mono_class_init (klass);
7599 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7600 if (*ip == CEE_STFLD) {
7601 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7603 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7604 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7605 MonoInst *iargs [5];
7608 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7609 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7610 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7614 if (cfg->opt & MONO_OPT_INLINE) {
7615 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7616 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7617 g_assert (costs > 0);
7619 cfg->real_offset += 5;
7622 inline_costs += costs;
7624 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7629 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7631 store->flags |= ins_flag;
7638 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7639 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7640 MonoInst *iargs [4];
7643 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7644 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7645 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7646 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7647 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7648 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7650 g_assert (costs > 0);
7652 cfg->real_offset += 5;
7656 inline_costs += costs;
7658 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7662 if (sp [0]->type == STACK_VTYPE) {
7665 /* Have to compute the address of the variable */
7667 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7669 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7671 g_assert (var->klass == klass);
7673 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7677 if (*ip == CEE_LDFLDA) {
7678 dreg = alloc_preg (cfg);
7680 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7681 ins->klass = mono_class_from_mono_type (field->type);
7682 ins->type = STACK_MP;
7687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7688 load->flags |= ins_flag;
7699 MonoClassField *field;
7700 gpointer addr = NULL;
7701 gboolean is_special_static;
7704 token = read32 (ip + 1);
7706 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7707 field = mono_method_get_wrapper_data (method, token);
7708 klass = field->parent;
7711 field = mono_field_from_token (image, token, &klass, generic_context);
7714 mono_class_init (klass);
7715 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7716 FIELD_ACCESS_FAILURE;
7719 * We can only support shared generic static
7720 * field access on architectures where the
7721 * trampoline code has been extended to handle
7722 * the generic class init.
7724 #ifndef MONO_ARCH_VTABLE_REG
7725 GENERIC_SHARING_FAILURE (*ip);
7728 if (cfg->generic_sharing_context)
7729 context_used = mono_class_check_context_used (klass);
7731 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7733 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7734 * to be called here.
7736 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7737 mono_class_vtable (cfg->domain, klass);
7738 CHECK_TYPELOAD (klass);
7740 mono_domain_lock (cfg->domain);
7741 if (cfg->domain->special_static_fields)
7742 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7743 mono_domain_unlock (cfg->domain);
7745 is_special_static = mono_class_field_is_special_static (field);
7747 /* Generate IR to compute the field address */
7749 if ((cfg->opt & MONO_OPT_SHARED) ||
7750 (cfg->compile_aot && is_special_static) ||
7751 (context_used && is_special_static)) {
7752 MonoInst *iargs [2];
7754 g_assert (field->parent);
7755 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7757 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7758 field, MONO_RGCTX_INFO_CLASS_FIELD);
7760 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7762 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7763 } else if (context_used) {
7764 MonoInst *static_data;
7767 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7768 method->klass->name_space, method->klass->name, method->name,
7769 depth, field->offset);
7772 if (mono_class_needs_cctor_run (klass, method)) {
7776 vtable = emit_get_rgctx_klass (cfg, context_used,
7777 klass, MONO_RGCTX_INFO_VTABLE);
7779 // FIXME: This doesn't work since it tries to pass the argument
7780 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7782 * The vtable pointer is always passed in a register regardless of
7783 * the calling convention, so assign it manually, and make a call
7784 * using a signature without parameters.
7786 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7787 #ifdef MONO_ARCH_VTABLE_REG
7788 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7789 cfg->uses_vtable_reg = TRUE;
7796 * The pointer we're computing here is
7798 * super_info.static_data + field->offset
7800 static_data = emit_get_rgctx_klass (cfg, context_used,
7801 klass, MONO_RGCTX_INFO_STATIC_DATA);
7803 if (field->offset == 0) {
7806 int addr_reg = mono_alloc_preg (cfg);
7807 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7809 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7810 MonoInst *iargs [2];
7812 g_assert (field->parent);
7813 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7814 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7815 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7817 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7819 CHECK_TYPELOAD (klass);
7821 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7822 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7823 if (cfg->verbose_level > 2)
7824 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7825 class_inits = g_slist_prepend (class_inits, vtable);
7827 if (cfg->run_cctors) {
7829 /* This makes so that inline cannot trigger */
7830 /* .cctors: too many apps depend on them */
7831 /* running with a specific order... */
7832 if (! vtable->initialized)
7834 ex = mono_runtime_class_init_full (vtable, FALSE);
7836 set_exception_object (cfg, ex);
7837 goto exception_exit;
7841 addr = (char*)vtable->data + field->offset;
7843 if (cfg->compile_aot)
7844 EMIT_NEW_SFLDACONST (cfg, ins, field);
7846 EMIT_NEW_PCONST (cfg, ins, addr);
7849 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7850 * This could be later optimized to do just a couple of
7851 * memory dereferences with constant offsets.
7853 MonoInst *iargs [1];
7854 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7855 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7859 /* Generate IR to do the actual load/store operation */
7861 if (*ip == CEE_LDSFLDA) {
7862 ins->klass = mono_class_from_mono_type (field->type);
7864 } else if (*ip == CEE_STSFLD) {
7869 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7870 store->flags |= ins_flag;
7872 gboolean is_const = FALSE;
7873 MonoVTable *vtable = NULL;
7875 if (!context_used) {
7876 vtable = mono_class_vtable (cfg->domain, klass);
7877 CHECK_TYPELOAD (klass);
7879 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7880 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7881 gpointer addr = (char*)vtable->data + field->offset;
7882 int ro_type = field->type->type;
7883 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7884 ro_type = field->type->data.klass->enum_basetype->type;
7886 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7889 case MONO_TYPE_BOOLEAN:
7891 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7895 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7898 case MONO_TYPE_CHAR:
7900 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7904 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7909 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7913 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7918 case MONO_TYPE_STRING:
7919 case MONO_TYPE_OBJECT:
7920 case MONO_TYPE_CLASS:
7921 case MONO_TYPE_SZARRAY:
7923 case MONO_TYPE_FNPTR:
7924 case MONO_TYPE_ARRAY:
7925 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7926 type_to_eval_stack_type ((cfg), field->type, *sp);
7931 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7936 case MONO_TYPE_VALUETYPE:
7946 CHECK_STACK_OVF (1);
7948 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7949 load->flags |= ins_flag;
7962 token = read32 (ip + 1);
7963 klass = mini_get_class (method, token, generic_context);
7964 CHECK_TYPELOAD (klass);
7965 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7966 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7977 const char *data_ptr;
7984 token = read32 (ip + 1);
7986 klass = mini_get_class (method, token, generic_context);
7987 CHECK_TYPELOAD (klass);
7989 if (cfg->generic_sharing_context)
7990 context_used = mono_class_check_context_used (klass);
7995 /* FIXME: Decompose later to help abcrem */
7998 args [0] = emit_get_rgctx_klass (cfg, context_used,
7999 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8004 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8006 if (cfg->opt & MONO_OPT_SHARED) {
8007 /* Decompose now to avoid problems with references to the domainvar */
8008 MonoInst *iargs [3];
8010 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8011 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8014 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8016 /* Decompose later since it is needed by abcrem */
8017 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8018 ins->dreg = alloc_preg (cfg);
8019 ins->sreg1 = sp [0]->dreg;
8020 ins->inst_newa_class = klass;
8021 ins->type = STACK_OBJ;
8023 MONO_ADD_INS (cfg->cbb, ins);
8024 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8025 cfg->cbb->has_array_access = TRUE;
8027 /* Needed so mono_emit_load_get_addr () gets called */
8028 mono_get_got_var (cfg);
8038 * we inline/optimize the initialization sequence if possible.
8039 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8040 * for small sizes open code the memcpy
8041 * ensure the rva field is big enough
8043 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
8044 MonoMethod *memcpy_method = get_memcpy_method ();
8045 MonoInst *iargs [3];
8046 int add_reg = alloc_preg (cfg);
8048 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8049 if (cfg->compile_aot) {
8050 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
8052 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8054 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8055 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8064 if (sp [0]->type != STACK_OBJ)
8067 dreg = alloc_preg (cfg);
8068 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8069 ins->dreg = alloc_preg (cfg);
8070 ins->sreg1 = sp [0]->dreg;
8071 ins->type = STACK_I4;
8072 MONO_ADD_INS (cfg->cbb, ins);
8073 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8074 cfg->cbb->has_array_access = TRUE;
8082 if (sp [0]->type != STACK_OBJ)
8085 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8087 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8088 CHECK_TYPELOAD (klass);
8089 /* we need to make sure that this array is exactly the type it needs
8090 * to be for correctness. the wrappers are lax with their usage
8091 * so we need to ignore them here
8093 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8094 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8097 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8101 case CEE_LDELEM_ANY:
8112 case CEE_LDELEM_REF: {
8118 if (*ip == CEE_LDELEM_ANY) {
8120 token = read32 (ip + 1);
8121 klass = mini_get_class (method, token, generic_context);
8122 CHECK_TYPELOAD (klass);
8123 mono_class_init (klass);
8126 klass = array_access_to_klass (*ip);
8128 if (sp [0]->type != STACK_OBJ)
8131 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8133 if (sp [1]->opcode == OP_ICONST) {
8134 int array_reg = sp [0]->dreg;
8135 int index_reg = sp [1]->dreg;
8136 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8138 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8139 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8141 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8142 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8145 if (*ip == CEE_LDELEM_ANY)
8158 case CEE_STELEM_REF:
8159 case CEE_STELEM_ANY: {
8165 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8167 if (*ip == CEE_STELEM_ANY) {
8169 token = read32 (ip + 1);
8170 klass = mini_get_class (method, token, generic_context);
8171 CHECK_TYPELOAD (klass);
8172 mono_class_init (klass);
8175 klass = array_access_to_klass (*ip);
8177 if (sp [0]->type != STACK_OBJ)
8180 /* storing a NULL doesn't need any of the complex checks in stelemref */
8181 if (generic_class_is_reference_type (cfg, klass) &&
8182 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8183 MonoMethod* helper = mono_marshal_get_stelemref ();
8184 MonoInst *iargs [3];
8186 if (sp [0]->type != STACK_OBJ)
8188 if (sp [2]->type != STACK_OBJ)
8195 mono_emit_method_call (cfg, helper, iargs, NULL);
8197 if (sp [1]->opcode == OP_ICONST) {
8198 int array_reg = sp [0]->dreg;
8199 int index_reg = sp [1]->dreg;
8200 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8202 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8205 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8206 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8210 if (*ip == CEE_STELEM_ANY)
8217 case CEE_CKFINITE: {
8221 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8222 ins->sreg1 = sp [0]->dreg;
8223 ins->dreg = alloc_freg (cfg);
8224 ins->type = STACK_R8;
8225 MONO_ADD_INS (bblock, ins);
8228 mono_decompose_opcode (cfg, ins);
8233 case CEE_REFANYVAL: {
8234 MonoInst *src_var, *src;
8236 int klass_reg = alloc_preg (cfg);
8237 int dreg = alloc_preg (cfg);
8240 MONO_INST_NEW (cfg, ins, *ip);
8243 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8244 CHECK_TYPELOAD (klass);
8245 mono_class_init (klass);
8247 if (cfg->generic_sharing_context)
8248 context_used = mono_class_check_context_used (klass);
8251 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8253 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8254 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8258 MonoInst *klass_ins;
8260 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8261 klass, MONO_RGCTX_INFO_KLASS);
8264 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8265 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8267 mini_emit_class_check (cfg, klass_reg, klass);
8269 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8270 ins->type = STACK_MP;
8275 case CEE_MKREFANY: {
8276 MonoInst *loc, *addr;
8279 MONO_INST_NEW (cfg, ins, *ip);
8282 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8283 CHECK_TYPELOAD (klass);
8284 mono_class_init (klass);
8286 if (cfg->generic_sharing_context)
8287 context_used = mono_class_check_context_used (klass);
8289 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8290 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8293 MonoInst *const_ins;
8294 int type_reg = alloc_preg (cfg);
8296 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8297 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8300 } else if (cfg->compile_aot) {
8301 int const_reg = alloc_preg (cfg);
8302 int type_reg = alloc_preg (cfg);
8304 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8306 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8309 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8310 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8312 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8314 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8315 ins->type = STACK_VTYPE;
8316 ins->klass = mono_defaults.typed_reference_class;
8323 MonoClass *handle_class;
8325 CHECK_STACK_OVF (1);
8328 n = read32 (ip + 1);
8330 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8331 handle = mono_method_get_wrapper_data (method, n);
8332 handle_class = mono_method_get_wrapper_data (method, n + 1);
8333 if (handle_class == mono_defaults.typehandle_class)
8334 handle = &((MonoClass*)handle)->byval_arg;
8337 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8341 mono_class_init (handle_class);
8342 if (cfg->generic_sharing_context) {
8343 if (handle_class == mono_defaults.typehandle_class) {
8344 /* If we get a MONO_TYPE_CLASS
8345 then we need to provide the
8347 instantiation of it. */
8348 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8351 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8352 } else if (handle_class == mono_defaults.fieldhandle_class)
8353 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8354 else if (handle_class == mono_defaults.methodhandle_class)
8355 context_used = mono_method_check_context_used (handle);
8357 g_assert_not_reached ();
8360 if (cfg->opt & MONO_OPT_SHARED) {
8361 MonoInst *addr, *vtvar, *iargs [3];
8362 int method_context_used;
8364 if (cfg->generic_sharing_context)
8365 method_context_used = mono_method_check_context_used (method);
8367 method_context_used = 0;
8369 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8371 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8372 EMIT_NEW_ICONST (cfg, iargs [1], n);
8373 if (method_context_used) {
8374 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8375 method, MONO_RGCTX_INFO_METHOD);
8376 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8378 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8379 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8381 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8385 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8387 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8388 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8389 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8390 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8391 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8392 MonoClass *tclass = mono_class_from_mono_type (handle);
8394 mono_class_init (tclass);
8396 ins = emit_get_rgctx_klass (cfg, context_used,
8397 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8398 } else if (cfg->compile_aot) {
8400 * FIXME: We would have to include the context into the
8401 * aot constant too (tests/generic-array-type.2.exe
8404 if (generic_context)
8405 cfg->disable_aot = TRUE;
8406 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8408 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8410 ins->type = STACK_OBJ;
8411 ins->klass = cmethod->klass;
8414 MonoInst *addr, *vtvar;
8416 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8419 if (handle_class == mono_defaults.typehandle_class) {
8420 ins = emit_get_rgctx_klass (cfg, context_used,
8421 mono_class_from_mono_type (handle),
8422 MONO_RGCTX_INFO_TYPE);
8423 } else if (handle_class == mono_defaults.methodhandle_class) {
8424 ins = emit_get_rgctx_method (cfg, context_used,
8425 handle, MONO_RGCTX_INFO_METHOD);
8426 } else if (handle_class == mono_defaults.fieldhandle_class) {
8427 ins = emit_get_rgctx_field (cfg, context_used,
8428 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8430 g_assert_not_reached ();
8432 } else if (cfg->compile_aot) {
8433 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8435 EMIT_NEW_PCONST (cfg, ins, handle);
8437 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8438 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8439 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8449 MONO_INST_NEW (cfg, ins, OP_THROW);
8451 ins->sreg1 = sp [0]->dreg;
8453 bblock->out_of_line = TRUE;
8454 MONO_ADD_INS (bblock, ins);
8455 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8456 MONO_ADD_INS (bblock, ins);
8459 link_bblock (cfg, bblock, end_bblock);
8460 start_new_bblock = 1;
8462 case CEE_ENDFINALLY:
8463 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8464 MONO_ADD_INS (bblock, ins);
8466 start_new_bblock = 1;
8469 * Control will leave the method so empty the stack, otherwise
8470 * the next basic block will start with a nonempty stack.
8472 while (sp != stack_start) {
8480 if (*ip == CEE_LEAVE) {
8482 target = ip + 5 + (gint32)read32(ip + 1);
8485 target = ip + 2 + (signed char)(ip [1]);
8488 /* empty the stack */
8489 while (sp != stack_start) {
8494 * If this leave statement is in a catch block, check for a
8495 * pending exception, and rethrow it if necessary.
8497 for (i = 0; i < header->num_clauses; ++i) {
8498 MonoExceptionClause *clause = &header->clauses [i];
8501 * Use <= in the final comparison to handle clauses with multiple
8502 * leave statements, like in bug #78024.
8503 * The ordering of the exception clauses guarantees that we find the
8506 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8508 MonoBasicBlock *dont_throw;
8513 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8516 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8518 NEW_BBLOCK (cfg, dont_throw);
8521 * Currently, we allways rethrow the abort exception, despite the
8522 * fact that this is not correct. See thread6.cs for an example.
8523 * But propagating the abort exception is more important than
8524 * getting the sematics right.
8526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8528 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8530 MONO_START_BB (cfg, dont_throw);
8535 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8537 for (tmp = handlers; tmp; tmp = tmp->next) {
8539 link_bblock (cfg, bblock, tblock);
8540 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8541 ins->inst_target_bb = tblock;
8542 MONO_ADD_INS (bblock, ins);
8544 g_list_free (handlers);
8547 MONO_INST_NEW (cfg, ins, OP_BR);
8548 MONO_ADD_INS (bblock, ins);
8549 GET_BBLOCK (cfg, tblock, target);
8550 link_bblock (cfg, bblock, tblock);
8551 ins->inst_target_bb = tblock;
8552 start_new_bblock = 1;
8554 if (*ip == CEE_LEAVE)
8563 * Mono specific opcodes
8565 case MONO_CUSTOM_PREFIX: {
8567 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8571 case CEE_MONO_ICALL: {
8573 MonoJitICallInfo *info;
8575 token = read32 (ip + 2);
8576 func = mono_method_get_wrapper_data (method, token);
8577 info = mono_find_jit_icall_by_addr (func);
8580 CHECK_STACK (info->sig->param_count);
8581 sp -= info->sig->param_count;
8583 ins = mono_emit_jit_icall (cfg, info->func, sp);
8584 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8588 inline_costs += 10 * num_calls++;
8592 case CEE_MONO_LDPTR: {
8595 CHECK_STACK_OVF (1);
8597 token = read32 (ip + 2);
8599 ptr = mono_method_get_wrapper_data (method, token);
8600 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8601 MonoJitICallInfo *callinfo;
8602 const char *icall_name;
8604 icall_name = method->name + strlen ("__icall_wrapper_");
8605 g_assert (icall_name);
8606 callinfo = mono_find_jit_icall_by_name (icall_name);
8607 g_assert (callinfo);
8609 if (ptr == callinfo->func) {
8610 /* Will be transformed into an AOTCONST later */
8611 EMIT_NEW_PCONST (cfg, ins, ptr);
8617 /* FIXME: Generalize this */
8618 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8619 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8624 EMIT_NEW_PCONST (cfg, ins, ptr);
8627 inline_costs += 10 * num_calls++;
8628 /* Can't embed random pointers into AOT code */
8629 cfg->disable_aot = 1;
8632 case CEE_MONO_ICALL_ADDR: {
8633 MonoMethod *cmethod;
8636 CHECK_STACK_OVF (1);
8638 token = read32 (ip + 2);
8640 cmethod = mono_method_get_wrapper_data (method, token);
8642 if (cfg->compile_aot) {
8643 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8645 ptr = mono_lookup_internal_call (cmethod);
8647 EMIT_NEW_PCONST (cfg, ins, ptr);
8653 case CEE_MONO_VTADDR: {
8654 MonoInst *src_var, *src;
8660 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8661 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8666 case CEE_MONO_NEWOBJ: {
8667 MonoInst *iargs [2];
8669 CHECK_STACK_OVF (1);
8671 token = read32 (ip + 2);
8672 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8673 mono_class_init (klass);
8674 NEW_DOMAINCONST (cfg, iargs [0]);
8675 MONO_ADD_INS (cfg->cbb, iargs [0]);
8676 NEW_CLASSCONST (cfg, iargs [1], klass);
8677 MONO_ADD_INS (cfg->cbb, iargs [1]);
8678 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8680 inline_costs += 10 * num_calls++;
8683 case CEE_MONO_OBJADDR:
8686 MONO_INST_NEW (cfg, ins, OP_MOVE);
8687 ins->dreg = alloc_preg (cfg);
8688 ins->sreg1 = sp [0]->dreg;
8689 ins->type = STACK_MP;
8690 MONO_ADD_INS (cfg->cbb, ins);
8694 case CEE_MONO_LDNATIVEOBJ:
8696 * Similar to LDOBJ, but instead load the unmanaged
8697 * representation of the vtype to the stack.
8702 token = read32 (ip + 2);
8703 klass = mono_method_get_wrapper_data (method, token);
8704 g_assert (klass->valuetype);
8705 mono_class_init (klass);
8708 MonoInst *src, *dest, *temp;
8711 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8712 temp->backend.is_pinvoke = 1;
8713 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8714 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8716 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8717 dest->type = STACK_VTYPE;
8718 dest->klass = klass;
8724 case CEE_MONO_RETOBJ: {
8726 * Same as RET, but return the native representation of a vtype
8729 g_assert (cfg->ret);
8730 g_assert (mono_method_signature (method)->pinvoke);
8735 token = read32 (ip + 2);
8736 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8738 if (!cfg->vret_addr) {
8739 g_assert (cfg->ret_var_is_local);
8741 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8743 EMIT_NEW_RETLOADA (cfg, ins);
8745 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8747 if (sp != stack_start)
8750 MONO_INST_NEW (cfg, ins, OP_BR);
8751 ins->inst_target_bb = end_bblock;
8752 MONO_ADD_INS (bblock, ins);
8753 link_bblock (cfg, bblock, end_bblock);
8754 start_new_bblock = 1;
8758 case CEE_MONO_CISINST:
8759 case CEE_MONO_CCASTCLASS: {
8764 token = read32 (ip + 2);
8765 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8766 if (ip [1] == CEE_MONO_CISINST)
8767 ins = handle_cisinst (cfg, klass, sp [0]);
8769 ins = handle_ccastclass (cfg, klass, sp [0]);
8775 case CEE_MONO_SAVE_LMF:
8776 case CEE_MONO_RESTORE_LMF:
8777 #ifdef MONO_ARCH_HAVE_LMF_OPS
8778 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8779 MONO_ADD_INS (bblock, ins);
8780 cfg->need_lmf_area = TRUE;
8784 case CEE_MONO_CLASSCONST:
8785 CHECK_STACK_OVF (1);
8787 token = read32 (ip + 2);
8788 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8791 inline_costs += 10 * num_calls++;
8793 case CEE_MONO_NOT_TAKEN:
8794 bblock->out_of_line = TRUE;
8798 CHECK_STACK_OVF (1);
8800 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8801 ins->dreg = alloc_preg (cfg);
8802 ins->inst_offset = (gint32)read32 (ip + 2);
8803 ins->type = STACK_PTR;
8804 MONO_ADD_INS (bblock, ins);
8809 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8819 /* somewhat similar to LDTOKEN */
8820 MonoInst *addr, *vtvar;
8821 CHECK_STACK_OVF (1);
8822 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8824 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8825 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8827 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8828 ins->type = STACK_VTYPE;
8829 ins->klass = mono_defaults.argumenthandle_class;
8842 * The following transforms:
8843 * CEE_CEQ into OP_CEQ
8844 * CEE_CGT into OP_CGT
8845 * CEE_CGT_UN into OP_CGT_UN
8846 * CEE_CLT into OP_CLT
8847 * CEE_CLT_UN into OP_CLT_UN
8849 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8851 MONO_INST_NEW (cfg, ins, cmp->opcode);
8853 cmp->sreg1 = sp [0]->dreg;
8854 cmp->sreg2 = sp [1]->dreg;
8855 type_from_op (cmp, sp [0], sp [1]);
8857 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8858 cmp->opcode = OP_LCOMPARE;
8859 else if (sp [0]->type == STACK_R8)
8860 cmp->opcode = OP_FCOMPARE;
8862 cmp->opcode = OP_ICOMPARE;
8863 MONO_ADD_INS (bblock, cmp);
8864 ins->type = STACK_I4;
8865 ins->dreg = alloc_dreg (cfg, ins->type);
8866 type_from_op (ins, sp [0], sp [1]);
8868 if (cmp->opcode == OP_FCOMPARE) {
8870 * The backends expect the fceq opcodes to do the
8873 cmp->opcode = OP_NOP;
8874 ins->sreg1 = cmp->sreg1;
8875 ins->sreg2 = cmp->sreg2;
8877 MONO_ADD_INS (bblock, ins);
8884 MonoMethod *cil_method, *ctor_method;
8885 gboolean needs_static_rgctx_invoke;
8887 CHECK_STACK_OVF (1);
8889 n = read32 (ip + 2);
8890 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8893 mono_class_init (cmethod->klass);
8895 mono_save_token_info (cfg, image, n, cmethod);
8897 if (cfg->generic_sharing_context)
8898 context_used = mono_method_check_context_used (cmethod);
8900 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8902 cil_method = cmethod;
8903 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8904 METHOD_ACCESS_FAILURE;
8906 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8907 if (check_linkdemand (cfg, method, cmethod))
8909 CHECK_CFG_EXCEPTION;
8910 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8911 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8915 * Optimize the common case of ldftn+delegate creation
8917 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8918 /* FIXME: SGEN support */
8919 /* FIXME: handle shared static generic methods */
8920 /* FIXME: handle this in shared code */
8921 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8922 MonoInst *target_ins;
8925 if (cfg->verbose_level > 3)
8926 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8927 target_ins = sp [-1];
8929 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8937 if (needs_static_rgctx_invoke)
8938 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8940 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8941 } else if (needs_static_rgctx_invoke) {
8942 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8944 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8946 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8950 inline_costs += 10 * num_calls++;
8953 case CEE_LDVIRTFTN: {
8958 n = read32 (ip + 2);
8959 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8962 mono_class_init (cmethod->klass);
8964 if (cfg->generic_sharing_context)
8965 context_used = mono_method_check_context_used (cmethod);
8967 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8968 if (check_linkdemand (cfg, method, cmethod))
8970 CHECK_CFG_EXCEPTION;
8971 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8972 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8979 args [1] = emit_get_rgctx_method (cfg, context_used,
8980 cmethod, MONO_RGCTX_INFO_METHOD);
8981 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8983 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8984 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8988 inline_costs += 10 * num_calls++;
8992 CHECK_STACK_OVF (1);
8994 n = read16 (ip + 2);
8996 EMIT_NEW_ARGLOAD (cfg, ins, n);
9001 CHECK_STACK_OVF (1);
9003 n = read16 (ip + 2);
9005 NEW_ARGLOADA (cfg, ins, n);
9006 MONO_ADD_INS (cfg->cbb, ins);
9014 n = read16 (ip + 2);
9016 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9018 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9022 CHECK_STACK_OVF (1);
9024 n = read16 (ip + 2);
9026 EMIT_NEW_LOCLOAD (cfg, ins, n);
9031 unsigned char *tmp_ip;
9032 CHECK_STACK_OVF (1);
9034 n = read16 (ip + 2);
9037 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9043 EMIT_NEW_LOCLOADA (cfg, ins, n);
9052 n = read16 (ip + 2);
9054 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9056 emit_stloc_ir (cfg, sp, header, n);
9063 if (sp != stack_start)
9065 if (cfg->method != method)
9067 * Inlining this into a loop in a parent could lead to
9068 * stack overflows which is different behavior than the
9069 * non-inlined case, thus disable inlining in this case.
9071 goto inline_failure;
9073 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9074 ins->dreg = alloc_preg (cfg);
9075 ins->sreg1 = sp [0]->dreg;
9076 ins->type = STACK_PTR;
9077 MONO_ADD_INS (cfg->cbb, ins);
9079 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9080 if (header->init_locals)
9081 ins->flags |= MONO_INST_INIT;
9086 case CEE_ENDFILTER: {
9087 MonoExceptionClause *clause, *nearest;
9088 int cc, nearest_num;
9092 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9094 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9095 ins->sreg1 = (*sp)->dreg;
9096 MONO_ADD_INS (bblock, ins);
9097 start_new_bblock = 1;
9102 for (cc = 0; cc < header->num_clauses; ++cc) {
9103 clause = &header->clauses [cc];
9104 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9105 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9106 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9112 if ((ip - header->code) != nearest->handler_offset)
9117 case CEE_UNALIGNED_:
9118 ins_flag |= MONO_INST_UNALIGNED;
9119 /* FIXME: record alignment? we can assume 1 for now */
9124 ins_flag |= MONO_INST_VOLATILE;
9128 ins_flag |= MONO_INST_TAILCALL;
9129 cfg->flags |= MONO_CFG_HAS_TAIL;
9130 /* Can't inline tail calls at this time */
9131 inline_costs += 100000;
9138 token = read32 (ip + 2);
9139 klass = mini_get_class (method, token, generic_context);
9140 CHECK_TYPELOAD (klass);
9141 if (generic_class_is_reference_type (cfg, klass))
9142 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9144 mini_emit_initobj (cfg, *sp, NULL, klass);
9148 case CEE_CONSTRAINED_:
9150 token = read32 (ip + 2);
9151 constrained_call = mono_class_get_full (image, token, generic_context);
9152 CHECK_TYPELOAD (constrained_call);
9157 MonoInst *iargs [3];
9161 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9162 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9163 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9164 /* emit_memset only works when val == 0 */
9165 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9170 if (ip [1] == CEE_CPBLK) {
9171 MonoMethod *memcpy_method = get_memcpy_method ();
9172 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9174 MonoMethod *memset_method = get_memset_method ();
9175 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9185 ins_flag |= MONO_INST_NOTYPECHECK;
9187 ins_flag |= MONO_INST_NORANGECHECK;
9188 /* we ignore the no-nullcheck for now since we
9189 * really do it explicitly only when doing callvirt->call
9195 int handler_offset = -1;
9197 for (i = 0; i < header->num_clauses; ++i) {
9198 MonoExceptionClause *clause = &header->clauses [i];
9199 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9200 handler_offset = clause->handler_offset;
9205 bblock->flags |= BB_EXCEPTION_UNSAFE;
9207 g_assert (handler_offset != -1);
9209 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9210 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9211 ins->sreg1 = load->dreg;
9212 MONO_ADD_INS (bblock, ins);
9214 link_bblock (cfg, bblock, end_bblock);
9215 start_new_bblock = 1;
9223 CHECK_STACK_OVF (1);
9225 token = read32 (ip + 2);
9226 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9227 MonoType *type = mono_type_create_from_typespec (image, token);
9228 token = mono_type_size (type, &ialign);
9230 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9231 CHECK_TYPELOAD (klass);
9232 mono_class_init (klass);
9233 token = mono_class_value_size (klass, &align);
9235 EMIT_NEW_ICONST (cfg, ins, token);
9240 case CEE_REFANYTYPE: {
9241 MonoInst *src_var, *src;
9247 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9249 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9250 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9261 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9266 g_error ("opcode 0x%02x not handled", *ip);
9269 if (start_new_bblock != 1)
9272 bblock->cil_length = ip - bblock->cil_code;
9273 bblock->next_bb = end_bblock;
9275 if (cfg->method == method && cfg->domainvar) {
9277 MonoInst *get_domain;
9279 cfg->cbb = init_localsbb;
9281 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9282 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9285 get_domain->dreg = alloc_preg (cfg);
9286 MONO_ADD_INS (cfg->cbb, get_domain);
9288 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9289 MONO_ADD_INS (cfg->cbb, store);
9292 if (cfg->method == method && cfg->got_var)
9293 mono_emit_load_got_addr (cfg);
9295 if (header->init_locals) {
9298 cfg->cbb = init_localsbb;
9299 cfg->ip = header->code;
9300 for (i = 0; i < header->num_locals; ++i) {
9301 MonoType *ptype = header->locals [i];
9302 int t = ptype->type;
9303 dreg = cfg->locals [i]->dreg;
9305 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9306 t = ptype->data.klass->enum_basetype->type;
9308 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9309 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9310 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9311 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9312 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9313 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9314 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9315 ins->type = STACK_R8;
9316 ins->inst_p0 = (void*)&r8_0;
9317 ins->dreg = alloc_dreg (cfg, STACK_R8);
9318 MONO_ADD_INS (init_localsbb, ins);
9319 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9320 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9321 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9322 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9324 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9331 if (cfg->method == method) {
9333 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9334 bb->region = mono_find_block_region (cfg, bb->real_offset);
9336 mono_create_spvar_for_region (cfg, bb->region);
9337 if (cfg->verbose_level > 2)
9338 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9342 g_slist_free (class_inits);
9343 dont_inline = g_list_remove (dont_inline, method);
9345 if (inline_costs < 0) {
9348 /* Method is too large */
9349 mname = mono_method_full_name (method, TRUE);
9350 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9351 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9356 if ((cfg->verbose_level > 2) && (cfg->method == method))
9357 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9359 return inline_costs;
9362 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9363 g_slist_free (class_inits);
9364 dont_inline = g_list_remove (dont_inline, method);
9368 g_slist_free (class_inits);
9369 dont_inline = g_list_remove (dont_inline, method);
9373 g_slist_free (class_inits);
9374 dont_inline = g_list_remove (dont_inline, method);
9375 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9379 g_slist_free (class_inits);
9380 dont_inline = g_list_remove (dont_inline, method);
9381 set_exception_type_from_invalid_il (cfg, method, ip);
9386 store_membase_reg_to_store_membase_imm (int opcode)
9389 case OP_STORE_MEMBASE_REG:
9390 return OP_STORE_MEMBASE_IMM;
9391 case OP_STOREI1_MEMBASE_REG:
9392 return OP_STOREI1_MEMBASE_IMM;
9393 case OP_STOREI2_MEMBASE_REG:
9394 return OP_STOREI2_MEMBASE_IMM;
9395 case OP_STOREI4_MEMBASE_REG:
9396 return OP_STOREI4_MEMBASE_IMM;
9397 case OP_STOREI8_MEMBASE_REG:
9398 return OP_STOREI8_MEMBASE_IMM;
9400 g_assert_not_reached ();
9406 #endif /* DISABLE_JIT */
9409 mono_op_to_op_imm (int opcode)
9419 return OP_IDIV_UN_IMM;
9423 return OP_IREM_UN_IMM;
9437 return OP_ISHR_UN_IMM;
9454 return OP_LSHR_UN_IMM;
9457 return OP_COMPARE_IMM;
9459 return OP_ICOMPARE_IMM;
9461 return OP_LCOMPARE_IMM;
9463 case OP_STORE_MEMBASE_REG:
9464 return OP_STORE_MEMBASE_IMM;
9465 case OP_STOREI1_MEMBASE_REG:
9466 return OP_STOREI1_MEMBASE_IMM;
9467 case OP_STOREI2_MEMBASE_REG:
9468 return OP_STOREI2_MEMBASE_IMM;
9469 case OP_STOREI4_MEMBASE_REG:
9470 return OP_STOREI4_MEMBASE_IMM;
9472 #if defined(__i386__) || defined (__x86_64__)
9474 return OP_X86_PUSH_IMM;
9475 case OP_X86_COMPARE_MEMBASE_REG:
9476 return OP_X86_COMPARE_MEMBASE_IMM;
9478 #if defined(__x86_64__)
9479 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9480 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9482 case OP_VOIDCALL_REG:
9491 return OP_LOCALLOC_IMM;
9498 ldind_to_load_membase (int opcode)
9502 return OP_LOADI1_MEMBASE;
9504 return OP_LOADU1_MEMBASE;
9506 return OP_LOADI2_MEMBASE;
9508 return OP_LOADU2_MEMBASE;
9510 return OP_LOADI4_MEMBASE;
9512 return OP_LOADU4_MEMBASE;
9514 return OP_LOAD_MEMBASE;
9516 return OP_LOAD_MEMBASE;
9518 return OP_LOADI8_MEMBASE;
9520 return OP_LOADR4_MEMBASE;
9522 return OP_LOADR8_MEMBASE;
9524 g_assert_not_reached ();
9531 stind_to_store_membase (int opcode)
9535 return OP_STOREI1_MEMBASE_REG;
9537 return OP_STOREI2_MEMBASE_REG;
9539 return OP_STOREI4_MEMBASE_REG;
9542 return OP_STORE_MEMBASE_REG;
9544 return OP_STOREI8_MEMBASE_REG;
9546 return OP_STORER4_MEMBASE_REG;
9548 return OP_STORER8_MEMBASE_REG;
9550 g_assert_not_reached ();
9557 mono_load_membase_to_load_mem (int opcode)
9559 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9560 #if defined(__i386__) || defined(__x86_64__)
9562 case OP_LOAD_MEMBASE:
9564 case OP_LOADU1_MEMBASE:
9565 return OP_LOADU1_MEM;
9566 case OP_LOADU2_MEMBASE:
9567 return OP_LOADU2_MEM;
9568 case OP_LOADI4_MEMBASE:
9569 return OP_LOADI4_MEM;
9570 case OP_LOADU4_MEMBASE:
9571 return OP_LOADU4_MEM;
9572 #if SIZEOF_VOID_P == 8
9573 case OP_LOADI8_MEMBASE:
9574 return OP_LOADI8_MEM;
9583 op_to_op_dest_membase (int store_opcode, int opcode)
9585 #if defined(__i386__)
9586 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9591 return OP_X86_ADD_MEMBASE_REG;
9593 return OP_X86_SUB_MEMBASE_REG;
9595 return OP_X86_AND_MEMBASE_REG;
9597 return OP_X86_OR_MEMBASE_REG;
9599 return OP_X86_XOR_MEMBASE_REG;
9602 return OP_X86_ADD_MEMBASE_IMM;
9605 return OP_X86_SUB_MEMBASE_IMM;
9608 return OP_X86_AND_MEMBASE_IMM;
9611 return OP_X86_OR_MEMBASE_IMM;
9614 return OP_X86_XOR_MEMBASE_IMM;
9620 #if defined(__x86_64__)
9621 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9626 return OP_X86_ADD_MEMBASE_REG;
9628 return OP_X86_SUB_MEMBASE_REG;
9630 return OP_X86_AND_MEMBASE_REG;
9632 return OP_X86_OR_MEMBASE_REG;
9634 return OP_X86_XOR_MEMBASE_REG;
9636 return OP_X86_ADD_MEMBASE_IMM;
9638 return OP_X86_SUB_MEMBASE_IMM;
9640 return OP_X86_AND_MEMBASE_IMM;
9642 return OP_X86_OR_MEMBASE_IMM;
9644 return OP_X86_XOR_MEMBASE_IMM;
9646 return OP_AMD64_ADD_MEMBASE_REG;
9648 return OP_AMD64_SUB_MEMBASE_REG;
9650 return OP_AMD64_AND_MEMBASE_REG;
9652 return OP_AMD64_OR_MEMBASE_REG;
9654 return OP_AMD64_XOR_MEMBASE_REG;
9657 return OP_AMD64_ADD_MEMBASE_IMM;
9660 return OP_AMD64_SUB_MEMBASE_IMM;
9663 return OP_AMD64_AND_MEMBASE_IMM;
9666 return OP_AMD64_OR_MEMBASE_IMM;
9669 return OP_AMD64_XOR_MEMBASE_IMM;
9679 op_to_op_store_membase (int store_opcode, int opcode)
9681 #if defined(__i386__) || defined(__x86_64__)
9684 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9685 return OP_X86_SETEQ_MEMBASE;
9687 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9688 return OP_X86_SETNE_MEMBASE;
9696 op_to_op_src1_membase (int load_opcode, int opcode)
9699 /* FIXME: This has sign extension issues */
9701 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9702 return OP_X86_COMPARE_MEMBASE8_IMM;
9705 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9710 return OP_X86_PUSH_MEMBASE;
9711 case OP_COMPARE_IMM:
9712 case OP_ICOMPARE_IMM:
9713 return OP_X86_COMPARE_MEMBASE_IMM;
9716 return OP_X86_COMPARE_MEMBASE_REG;
9721 /* FIXME: This has sign extension issues */
9723 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9724 return OP_X86_COMPARE_MEMBASE8_IMM;
9729 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9730 return OP_X86_PUSH_MEMBASE;
9732 /* FIXME: This only works for 32 bit immediates
9733 case OP_COMPARE_IMM:
9734 case OP_LCOMPARE_IMM:
9735 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9736 return OP_AMD64_COMPARE_MEMBASE_IMM;
9738 case OP_ICOMPARE_IMM:
9739 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9740 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9744 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9745 return OP_AMD64_COMPARE_MEMBASE_REG;
9748 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9749 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9758 op_to_op_src2_membase (int load_opcode, int opcode)
9761 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9767 return OP_X86_COMPARE_REG_MEMBASE;
9769 return OP_X86_ADD_REG_MEMBASE;
9771 return OP_X86_SUB_REG_MEMBASE;
9773 return OP_X86_AND_REG_MEMBASE;
9775 return OP_X86_OR_REG_MEMBASE;
9777 return OP_X86_XOR_REG_MEMBASE;
9784 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9785 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9789 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9790 return OP_AMD64_COMPARE_REG_MEMBASE;
9793 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9794 return OP_X86_ADD_REG_MEMBASE;
9796 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9797 return OP_X86_SUB_REG_MEMBASE;
9799 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9800 return OP_X86_AND_REG_MEMBASE;
9802 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9803 return OP_X86_OR_REG_MEMBASE;
9805 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9806 return OP_X86_XOR_REG_MEMBASE;
9808 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9809 return OP_AMD64_ADD_REG_MEMBASE;
9811 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9812 return OP_AMD64_SUB_REG_MEMBASE;
9814 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9815 return OP_AMD64_AND_REG_MEMBASE;
9817 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9818 return OP_AMD64_OR_REG_MEMBASE;
9820 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9821 return OP_AMD64_XOR_REG_MEMBASE;
9829 mono_op_to_op_imm_noemul (int opcode)
9832 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9837 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9845 return mono_op_to_op_imm (opcode);
9852 * mono_handle_global_vregs:
9854 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9858 mono_handle_global_vregs (MonoCompile *cfg)
9864 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9866 #ifdef MONO_ARCH_SIMD_INTRINSICS
9867 if (cfg->uses_simd_intrinsics)
9868 mono_simd_simplify_indirection (cfg);
9871 /* Find local vregs used in more than one bb */
9872 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9873 MonoInst *ins = bb->code;
9874 int block_num = bb->block_num;
9876 if (cfg->verbose_level > 2)
9877 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9880 for (; ins; ins = ins->next) {
9881 const char *spec = INS_INFO (ins->opcode);
9882 int regtype, regindex;
9885 if (G_UNLIKELY (cfg->verbose_level > 2))
9886 mono_print_ins (ins);
9888 g_assert (ins->opcode >= MONO_CEE_LAST);
9890 for (regindex = 0; regindex < 3; regindex ++) {
9893 if (regindex == 0) {
9894 regtype = spec [MONO_INST_DEST];
9898 } else if (regindex == 1) {
9899 regtype = spec [MONO_INST_SRC1];
9904 regtype = spec [MONO_INST_SRC2];
9910 #if SIZEOF_VOID_P == 4
9911 if (regtype == 'l') {
9913 * Since some instructions reference the original long vreg,
9914 * and some reference the two component vregs, it is quite hard
9915 * to determine when it needs to be global. So be conservative.
9917 if (!get_vreg_to_inst (cfg, vreg)) {
9918 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9920 if (cfg->verbose_level > 2)
9921 printf ("LONG VREG R%d made global.\n", vreg);
9925 * Make the component vregs volatile since the optimizations can
9926 * get confused otherwise.
9928 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9929 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9933 g_assert (vreg != -1);
9935 prev_bb = vreg_to_bb [vreg];
9937 /* 0 is a valid block num */
9938 vreg_to_bb [vreg] = block_num + 1;
9939 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9940 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9943 if (!get_vreg_to_inst (cfg, vreg)) {
9944 if (G_UNLIKELY (cfg->verbose_level > 2))
9945 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9949 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9952 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9955 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9958 g_assert_not_reached ();
9962 /* Flag as having been used in more than one bb */
9963 vreg_to_bb [vreg] = -1;
9969 /* If a variable is used in only one bblock, convert it into a local vreg */
9970 for (i = 0; i < cfg->num_varinfo; i++) {
9971 MonoInst *var = cfg->varinfo [i];
9972 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9974 switch (var->type) {
9980 #if SIZEOF_VOID_P == 8
9983 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9984 /* Enabling this screws up the fp stack on x86 */
9987 /* Arguments are implicitly global */
9988 /* Putting R4 vars into registers doesn't work currently */
9989 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
9991 * Make that the variable's liveness interval doesn't contain a call, since
9992 * that would cause the lvreg to be spilled, making the whole optimization
9995 /* This is too slow for JIT compilation */
9997 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9999 int def_index, call_index, ins_index;
10000 gboolean spilled = FALSE;
10005 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10006 const char *spec = INS_INFO (ins->opcode);
10008 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10009 def_index = ins_index;
10011 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10012 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10013 if (call_index > def_index) {
10019 if (MONO_IS_CALL (ins))
10020 call_index = ins_index;
10030 if (G_UNLIKELY (cfg->verbose_level > 2))
10031 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10032 var->flags |= MONO_INST_IS_DEAD;
10033 cfg->vreg_to_inst [var->dreg] = NULL;
10040 * Compress the varinfo and vars tables so the liveness computation is faster and
10041 * takes up less space.
10044 for (i = 0; i < cfg->num_varinfo; ++i) {
10045 MonoInst *var = cfg->varinfo [i];
10046 if (pos < i && cfg->locals_start == i)
10047 cfg->locals_start = pos;
10048 if (!(var->flags & MONO_INST_IS_DEAD)) {
10050 cfg->varinfo [pos] = cfg->varinfo [i];
10051 cfg->varinfo [pos]->inst_c0 = pos;
10052 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10053 cfg->vars [pos].idx = pos;
10054 #if SIZEOF_VOID_P == 4
10055 if (cfg->varinfo [pos]->type == STACK_I8) {
10056 /* Modify the two component vars too */
10059 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10060 var1->inst_c0 = pos;
10061 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10062 var1->inst_c0 = pos;
10069 cfg->num_varinfo = pos;
10070 if (cfg->locals_start > cfg->num_varinfo)
10071 cfg->locals_start = cfg->num_varinfo;
10075 * mono_spill_global_vars:
10077 * Generate spill code for variables which are not allocated to registers,
10078 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10079 * code is generated which could be optimized by the local optimization passes.
10082 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10084 MonoBasicBlock *bb;
10086 int orig_next_vreg;
10087 guint32 *vreg_to_lvreg;
10089 guint32 i, lvregs_len;
10090 gboolean dest_has_lvreg = FALSE;
10091 guint32 stacktypes [128];
10093 *need_local_opts = FALSE;
10095 memset (spec2, 0, sizeof (spec2));
10097 /* FIXME: Move this function to mini.c */
10098 stacktypes ['i'] = STACK_PTR;
10099 stacktypes ['l'] = STACK_I8;
10100 stacktypes ['f'] = STACK_R8;
10101 #ifdef MONO_ARCH_SIMD_INTRINSICS
10102 stacktypes ['x'] = STACK_VTYPE;
10105 #if SIZEOF_VOID_P == 4
10106 /* Create MonoInsts for longs */
10107 for (i = 0; i < cfg->num_varinfo; i++) {
10108 MonoInst *ins = cfg->varinfo [i];
10110 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10111 switch (ins->type) {
10112 #ifdef MONO_ARCH_SOFT_FLOAT
10118 g_assert (ins->opcode == OP_REGOFFSET);
10120 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10122 tree->opcode = OP_REGOFFSET;
10123 tree->inst_basereg = ins->inst_basereg;
10124 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10126 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10128 tree->opcode = OP_REGOFFSET;
10129 tree->inst_basereg = ins->inst_basereg;
10130 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10140 /* FIXME: widening and truncation */
10143 * As an optimization, when a variable allocated to the stack is first loaded into
10144 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10145 * the variable again.
10147 orig_next_vreg = cfg->next_vreg;
10148 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10149 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10152 /* Add spill loads/stores */
10153 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10156 if (cfg->verbose_level > 2)
10157 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10159 /* Clear vreg_to_lvreg array */
10160 for (i = 0; i < lvregs_len; i++)
10161 vreg_to_lvreg [lvregs [i]] = 0;
10165 MONO_BB_FOR_EACH_INS (bb, ins) {
10166 const char *spec = INS_INFO (ins->opcode);
10167 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10168 gboolean store, no_lvreg;
10170 if (G_UNLIKELY (cfg->verbose_level > 2))
10171 mono_print_ins (ins);
10173 if (ins->opcode == OP_NOP)
10177 * We handle LDADDR here as well, since it can only be decomposed
10178 * when variable addresses are known.
10180 if (ins->opcode == OP_LDADDR) {
10181 MonoInst *var = ins->inst_p0;
10183 if (var->opcode == OP_VTARG_ADDR) {
10184 /* Happens on SPARC/S390 where vtypes are passed by reference */
10185 MonoInst *vtaddr = var->inst_left;
10186 if (vtaddr->opcode == OP_REGVAR) {
10187 ins->opcode = OP_MOVE;
10188 ins->sreg1 = vtaddr->dreg;
10190 else if (var->inst_left->opcode == OP_REGOFFSET) {
10191 ins->opcode = OP_LOAD_MEMBASE;
10192 ins->inst_basereg = vtaddr->inst_basereg;
10193 ins->inst_offset = vtaddr->inst_offset;
10197 g_assert (var->opcode == OP_REGOFFSET);
10199 ins->opcode = OP_ADD_IMM;
10200 ins->sreg1 = var->inst_basereg;
10201 ins->inst_imm = var->inst_offset;
10204 *need_local_opts = TRUE;
10205 spec = INS_INFO (ins->opcode);
10208 if (ins->opcode < MONO_CEE_LAST) {
10209 mono_print_ins (ins);
10210 g_assert_not_reached ();
10214 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10218 if (MONO_IS_STORE_MEMBASE (ins)) {
10219 tmp_reg = ins->dreg;
10220 ins->dreg = ins->sreg2;
10221 ins->sreg2 = tmp_reg;
10224 spec2 [MONO_INST_DEST] = ' ';
10225 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10226 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10228 } else if (MONO_IS_STORE_MEMINDEX (ins))
10229 g_assert_not_reached ();
10234 if (G_UNLIKELY (cfg->verbose_level > 2))
10235 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10240 regtype = spec [MONO_INST_DEST];
10241 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10244 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10245 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10246 MonoInst *store_ins;
10249 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10251 if (var->opcode == OP_REGVAR) {
10252 ins->dreg = var->dreg;
10253 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10255 * Instead of emitting a load+store, use a _membase opcode.
10257 g_assert (var->opcode == OP_REGOFFSET);
10258 if (ins->opcode == OP_MOVE) {
10261 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10262 ins->inst_basereg = var->inst_basereg;
10263 ins->inst_offset = var->inst_offset;
10266 spec = INS_INFO (ins->opcode);
10270 g_assert (var->opcode == OP_REGOFFSET);
10272 prev_dreg = ins->dreg;
10274 /* Invalidate any previous lvreg for this vreg */
10275 vreg_to_lvreg [ins->dreg] = 0;
10279 #ifdef MONO_ARCH_SOFT_FLOAT
10280 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10282 store_opcode = OP_STOREI8_MEMBASE_REG;
10286 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10288 if (regtype == 'l') {
10289 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10290 mono_bblock_insert_after_ins (bb, ins, store_ins);
10291 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10292 mono_bblock_insert_after_ins (bb, ins, store_ins);
10295 g_assert (store_opcode != OP_STOREV_MEMBASE);
10297 /* Try to fuse the store into the instruction itself */
10298 /* FIXME: Add more instructions */
10299 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10300 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10301 ins->inst_imm = ins->inst_c0;
10302 ins->inst_destbasereg = var->inst_basereg;
10303 ins->inst_offset = var->inst_offset;
10304 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10305 ins->opcode = store_opcode;
10306 ins->inst_destbasereg = var->inst_basereg;
10307 ins->inst_offset = var->inst_offset;
10311 tmp_reg = ins->dreg;
10312 ins->dreg = ins->sreg2;
10313 ins->sreg2 = tmp_reg;
10316 spec2 [MONO_INST_DEST] = ' ';
10317 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10318 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10320 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10321 // FIXME: The backends expect the base reg to be in inst_basereg
10322 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10324 ins->inst_basereg = var->inst_basereg;
10325 ins->inst_offset = var->inst_offset;
10326 spec = INS_INFO (ins->opcode);
10328 /* printf ("INS: "); mono_print_ins (ins); */
10329 /* Create a store instruction */
10330 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10332 /* Insert it after the instruction */
10333 mono_bblock_insert_after_ins (bb, ins, store_ins);
10336 * We can't assign ins->dreg to var->dreg here, since the
10337 * sregs could use it. So set a flag, and do it after
10340 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10341 dest_has_lvreg = TRUE;
10350 for (srcindex = 0; srcindex < 2; ++srcindex) {
10351 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10352 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10354 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10355 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10356 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10357 MonoInst *load_ins;
10358 guint32 load_opcode;
10360 if (var->opcode == OP_REGVAR) {
10362 ins->sreg1 = var->dreg;
10364 ins->sreg2 = var->dreg;
10368 g_assert (var->opcode == OP_REGOFFSET);
10370 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10372 g_assert (load_opcode != OP_LOADV_MEMBASE);
10374 if (vreg_to_lvreg [sreg]) {
10375 /* The variable is already loaded to an lvreg */
10376 if (G_UNLIKELY (cfg->verbose_level > 2))
10377 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10379 ins->sreg1 = vreg_to_lvreg [sreg];
10381 ins->sreg2 = vreg_to_lvreg [sreg];
10385 /* Try to fuse the load into the instruction */
10386 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10387 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10388 ins->inst_basereg = var->inst_basereg;
10389 ins->inst_offset = var->inst_offset;
10390 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10391 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10392 ins->sreg2 = var->inst_basereg;
10393 ins->inst_offset = var->inst_offset;
10395 if (MONO_IS_REAL_MOVE (ins)) {
10396 ins->opcode = OP_NOP;
10399 //printf ("%d ", srcindex); mono_print_ins (ins);
10401 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10403 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10404 if (var->dreg == prev_dreg) {
10406 * sreg refers to the value loaded by the load
10407 * emitted below, but we need to use ins->dreg
10408 * since it refers to the store emitted earlier.
10412 vreg_to_lvreg [var->dreg] = sreg;
10413 g_assert (lvregs_len < 1024);
10414 lvregs [lvregs_len ++] = var->dreg;
10423 if (regtype == 'l') {
10424 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10425 mono_bblock_insert_before_ins (bb, ins, load_ins);
10426 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10427 mono_bblock_insert_before_ins (bb, ins, load_ins);
10430 #if SIZEOF_VOID_P == 4
10431 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10433 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10434 mono_bblock_insert_before_ins (bb, ins, load_ins);
10440 if (dest_has_lvreg) {
10441 vreg_to_lvreg [prev_dreg] = ins->dreg;
10442 g_assert (lvregs_len < 1024);
10443 lvregs [lvregs_len ++] = prev_dreg;
10444 dest_has_lvreg = FALSE;
10448 tmp_reg = ins->dreg;
10449 ins->dreg = ins->sreg2;
10450 ins->sreg2 = tmp_reg;
10453 if (MONO_IS_CALL (ins)) {
10454 /* Clear vreg_to_lvreg array */
10455 for (i = 0; i < lvregs_len; i++)
10456 vreg_to_lvreg [lvregs [i]] = 0;
10460 if (cfg->verbose_level > 2)
10461 mono_print_ins_index (1, ins);
10468 * - use 'iadd' instead of 'int_add'
10469 * - handling ovf opcodes: decompose in method_to_ir.
10470 * - unify iregs/fregs
10471 * -> partly done, the missing parts are:
10472 * - a more complete unification would involve unifying the hregs as well, so
10473 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10474 * would no longer map to the machine hregs, so the code generators would need to
10475 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10476 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10477 * fp/non-fp branches speeds it up by about 15%.
10478 * - use sext/zext opcodes instead of shifts
10480 * - get rid of TEMPLOADs if possible and use vregs instead
10481 * - clean up usage of OP_P/OP_ opcodes
10482 * - cleanup usage of DUMMY_USE
10483 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10485 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10486 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10487 * - make sure handle_stack_args () is called before the branch is emitted
10488 * - when the new IR is done, get rid of all unused stuff
10489 * - COMPARE/BEQ as separate instructions or unify them ?
10490 * - keeping them separate allows specialized compare instructions like
10491 * compare_imm, compare_membase
10492 * - most back ends unify fp compare+branch, fp compare+ceq
10493 * - integrate mono_save_args into inline_method
10494 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10495 * - handle long shift opts on 32 bit platforms somehow: they require
10496 * 3 sregs (2 for arg1 and 1 for arg2)
10497 * - make byref a 'normal' type.
10498 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10499 * variable if needed.
10500 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10501 * like inline_method.
10502 * - remove inlining restrictions
10503 * - fix LNEG and enable cfold of INEG
10504 * - generalize x86 optimizations like ldelema as a peephole optimization
10505 * - add store_mem_imm for amd64
10506 * - optimize the loading of the interruption flag in the managed->native wrappers
10507 * - avoid special handling of OP_NOP in passes
10508 * - move code inserting instructions into one function/macro.
10509 * - try a coalescing phase after liveness analysis
10510 * - add float -> vreg conversion + local optimizations on !x86
10511 * - figure out how to handle decomposed branches during optimizations, ie.
10512 * compare+branch, op_jump_table+op_br etc.
10513 * - promote RuntimeXHandles to vregs
10514 * - vtype cleanups:
10515 * - add a NEW_VARLOADA_VREG macro
10516 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10517 * accessing vtype fields.
10518 * - get rid of I8CONST on 64 bit platforms
10519 * - dealing with the increase in code size due to branches created during opcode
10521 * - use extended basic blocks
10522 * - all parts of the JIT
10523 * - handle_global_vregs () && local regalloc
10524 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10525 * - sources of increase in code size:
10528 * - isinst and castclass
10529 * - lvregs not allocated to global registers even if used multiple times
10530 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10532 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10533 * - add all micro optimizations from the old JIT
10534 * - put tree optimizations into the deadce pass
10535 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10536 * specific function.
10537 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10538 * fcompare + branchCC.
10539 * - create a helper function for allocating a stack slot, taking into account
10540 * MONO_CFG_HAS_SPILLUP.
10541 * - merge new GC changes in mini.c.
10543 * - merge the ia64 switch changes.
10544 * - merge the mips conditional changes.
10545 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10546 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10547 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10548 * - optimize mono_regstate2_alloc_int/float.
10549 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10550 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10551 * parts of the tree could be separated by other instructions, killing the tree
10552 * arguments, or stores killing loads etc. Also, should we fold loads into other
10553 * instructions if the result of the load is used multiple times ?
10554 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10555 * - LAST MERGE: 108395.
10556 * - when returning vtypes in registers, generate IR and append it to the end of the
10557 * last bb instead of doing it in the epilog.
10558 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10559 * ones in inssel.h.
10560 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10568 - When to decompose opcodes:
10569 - earlier: this makes some optimizations hard to implement, since the low level IR
10570 no longer contains the neccessary information. But it is easier to do.
10571 - later: harder to implement, enables more optimizations.
10572 - Branches inside bblocks:
10573 - created when decomposing complex opcodes.
10574 - branches to another bblock: harmless, but not tracked by the branch
10575 optimizations, so need to branch to a label at the start of the bblock.
10576 - branches to inside the same bblock: very problematic, trips up the local
10577 reg allocator. Can be fixed by spitting the current bblock, but that is a
10578 complex operation, since some local vregs can become global vregs etc.
10579 - Local/global vregs:
10580 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10581 local register allocator.
10582 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10583 structure, created by mono_create_var (). Assigned to hregs or the stack by
10584 the global register allocator.
10585 - When to do optimizations like alu->alu_imm:
10586 - earlier -> saves work later on since the IR will be smaller/simpler
10587 - later -> can work on more instructions
10588 - Handling of valuetypes:
10589 - When a vtype is pushed on the stack, a new temporary is created, an
10590 instruction computing its address (LDADDR) is emitted and pushed on
10591 the stack. Need to optimize cases when the vtype is used immediately as in
10592 argument passing, stloc etc.
10593 - Instead of the to_end stuff in the old JIT, simply call the function handling
10594 the values on the stack before emitting the last instruction of the bb.
10597 #endif /* DISABLE_JIT */