2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
59 #define BRANCH_COST 100
60 #define INLINE_LENGTH_LIMIT 20
61 #define INLINE_FAILURE do {\
62 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 #define CHECK_CFG_EXCEPTION do {\
66 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 #define METHOD_ACCESS_FAILURE do { \
70 char *method_fname = mono_method_full_name (method, TRUE); \
71 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
72 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
73 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
74 g_free (method_fname); \
75 g_free (cil_method_fname); \
76 goto exception_exit; \
78 #define FIELD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *field_fname = mono_field_full_name (field); \
81 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
82 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (field_fname); \
85 goto exception_exit; \
87 #define GENERIC_SHARING_FAILURE(opcode) do { \
88 if (cfg->generic_sharing_context) { \
89 if (cfg->verbose_level > 2) \
90 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
91 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
92 goto exception_exit; \
96 /* Determine whenever 'ins' represents a load of the 'this' argument */
97 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
99 static int ldind_to_load_membase (int opcode);
100 static int stind_to_store_membase (int opcode);
102 int mono_op_to_op_imm (int opcode);
103 int mono_op_to_op_imm_noemul (int opcode);
105 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
106 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
107 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
109 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
110 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
111 guint inline_offset, gboolean is_virtual_call);
113 /* helper methods signature */
114 extern MonoMethodSignature *helper_sig_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_domain_get;
116 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 * Instruction metadata
125 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
131 #if SIZEOF_VOID_P == 8
136 /* keep in sync with the enum in mini.h */
139 #include "mini-ops.h"
143 extern GHashTable *jit_icall_name_hash;
145 #define MONO_INIT_VARINFO(vi,id) do { \
146 (vi)->range.first_use.pos.bid = 0xffff; \
152 mono_alloc_ireg (MonoCompile *cfg)
154 return alloc_ireg (cfg);
158 mono_alloc_freg (MonoCompile *cfg)
160 return alloc_freg (cfg);
164 mono_alloc_preg (MonoCompile *cfg)
166 return alloc_preg (cfg);
170 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
172 return alloc_dreg (cfg, stack_type);
176 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
182 switch (type->type) {
185 case MONO_TYPE_BOOLEAN:
197 case MONO_TYPE_FNPTR:
199 case MONO_TYPE_CLASS:
200 case MONO_TYPE_STRING:
201 case MONO_TYPE_OBJECT:
202 case MONO_TYPE_SZARRAY:
203 case MONO_TYPE_ARRAY:
207 #if SIZEOF_VOID_P == 8
216 case MONO_TYPE_VALUETYPE:
217 if (type->data.klass->enumtype) {
218 type = type->data.klass->enum_basetype;
221 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
224 case MONO_TYPE_TYPEDBYREF:
226 case MONO_TYPE_GENERICINST:
227 type = &type->data.generic_class->container_class->byval_arg;
231 g_assert (cfg->generic_sharing_context);
234 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
240 mono_print_bb (MonoBasicBlock *bb, const char *msg)
245 printf ("\n%s %d: [IN: ", msg, bb->block_num);
246 for (i = 0; i < bb->in_count; ++i)
247 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
249 for (i = 0; i < bb->out_count; ++i)
250 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
252 for (tree = bb->code; tree; tree = tree->next)
253 mono_print_ins_index (-1, tree);
257 * Can't put this at the beginning, since other files reference stuff from this
262 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
264 #define GET_BBLOCK(cfg,tblock,ip) do { \
265 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
267 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
268 NEW_BBLOCK (cfg, (tblock)); \
269 (tblock)->cil_code = (ip); \
270 ADD_BBLOCK (cfg, (tblock)); \
274 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
275 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
276 int _length_reg = alloc_ireg (cfg); \
277 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
278 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
279 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
283 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
284 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
285 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
288 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
289 ins->sreg1 = array_reg; \
290 ins->sreg2 = index_reg; \
291 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
292 MONO_ADD_INS ((cfg)->cbb, ins); \
293 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
294 (cfg)->cbb->has_array_access = TRUE; \
298 #if defined(__i386__) || defined(__x86_64__)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_VOID_P == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 /* first search for handlers and filters */
473 for (i = 0; i < header->num_clauses; ++i) {
474 clause = &header->clauses [i];
475 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
476 (offset < (clause->handler_offset)))
477 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
479 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
480 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
481 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
482 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
483 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
485 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
489 /* search the try blocks */
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
493 return ((i + 1) << 8) | clause->flags;
500 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
502 MonoMethod *method = cfg->method;
503 MonoMethodHeader *header = mono_method_get_header (method);
504 MonoExceptionClause *clause;
505 MonoBasicBlock *handler;
509 for (i = 0; i < header->num_clauses; ++i) {
510 clause = &header->clauses [i];
511 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
512 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
513 if (clause->flags == type) {
514 handler = cfg->cil_offset_to_bb [clause->handler_offset];
516 res = g_list_append (res, handler);
524 mono_create_spvar_for_region (MonoCompile *cfg, int region)
528 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
532 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
533 /* prevent it from being register allocated */
534 var->flags |= MONO_INST_INDIRECT;
536 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
540 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
542 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
550 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
555 /* prevent it from being register allocated */
556 var->flags |= MONO_INST_INDIRECT;
558 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
564 * Returns the type used in the eval stack when @type is loaded.
565 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
568 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
572 inst->klass = klass = mono_class_from_mono_type (type);
574 inst->type = STACK_MP;
579 switch (type->type) {
581 inst->type = STACK_INV;
585 case MONO_TYPE_BOOLEAN:
591 inst->type = STACK_I4;
596 case MONO_TYPE_FNPTR:
597 inst->type = STACK_PTR;
599 case MONO_TYPE_CLASS:
600 case MONO_TYPE_STRING:
601 case MONO_TYPE_OBJECT:
602 case MONO_TYPE_SZARRAY:
603 case MONO_TYPE_ARRAY:
604 inst->type = STACK_OBJ;
608 inst->type = STACK_I8;
612 inst->type = STACK_R8;
614 case MONO_TYPE_VALUETYPE:
615 if (type->data.klass->enumtype) {
616 type = type->data.klass->enum_basetype;
620 inst->type = STACK_VTYPE;
623 case MONO_TYPE_TYPEDBYREF:
624 inst->klass = mono_defaults.typed_reference_class;
625 inst->type = STACK_VTYPE;
627 case MONO_TYPE_GENERICINST:
628 type = &type->data.generic_class->container_class->byval_arg;
631 case MONO_TYPE_MVAR :
632 /* FIXME: all the arguments must be references for now,
633 * later look inside cfg and see if the arg num is
636 g_assert (cfg->generic_sharing_context);
637 inst->type = STACK_OBJ;
640 g_error ("unknown type 0x%02x in eval stack type", type->type);
645 * The following tables are used to quickly validate the IL code in type_from_op ().
648 bin_num_table [STACK_MAX] [STACK_MAX] = {
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
653 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
654 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
655 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
661 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
664 /* reduce the size of this table */
666 bin_int_table [STACK_MAX] [STACK_MAX] = {
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
678 bin_comp_table [STACK_MAX] [STACK_MAX] = {
679 /* Inv i L p F & O vt */
681 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
682 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
683 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
684 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
685 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
686 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
687 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
690 /* reduce the size of this table */
692 shift_table [STACK_MAX] [STACK_MAX] = {
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
704 * Tables to map from the non-specific opcode to the matching
705 * type-specific opcode.
707 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
709 binops_op_map [STACK_MAX] = {
710 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
713 /* handles from CEE_NEG to CEE_CONV_U8 */
715 unops_op_map [STACK_MAX] = {
716 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
719 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
721 ovfops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
725 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
727 ovf2ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
731 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
733 ovf3ops_op_map [STACK_MAX] = {
734 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
737 /* handles from CEE_BEQ to CEE_BLT_UN */
739 beqops_op_map [STACK_MAX] = {
740 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
743 /* handles from CEE_CEQ to CEE_CLT_UN */
745 ceqops_op_map [STACK_MAX] = {
746 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
750 * Sets ins->type (the type on the eval stack) according to the
751 * type of the opcode and the arguments to it.
752 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
754 * FIXME: this function sets ins->type unconditionally in some cases, but
755 * it should set it to invalid for some types (a conv.x on an object)
758 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
760 switch (ins->opcode) {
767 /* FIXME: check unverifiable args for STACK_MP */
768 ins->type = bin_num_table [src1->type] [src2->type];
769 ins->opcode += binops_op_map [ins->type];
776 ins->type = bin_int_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
782 ins->type = shift_table [src1->type] [src2->type];
783 ins->opcode += binops_op_map [ins->type];
788 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
789 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
790 ins->opcode = OP_LCOMPARE;
791 else if (src1->type == STACK_R8)
792 ins->opcode = OP_FCOMPARE;
794 ins->opcode = OP_ICOMPARE;
796 case OP_ICOMPARE_IMM:
797 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
798 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
799 ins->opcode = OP_LCOMPARE_IMM;
811 ins->opcode += beqops_op_map [src1->type];
814 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
822 ins->opcode += ceqops_op_map [src1->type];
826 ins->type = neg_table [src1->type];
827 ins->opcode += unops_op_map [ins->type];
830 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
831 ins->type = src1->type;
833 ins->type = STACK_INV;
834 ins->opcode += unops_op_map [ins->type];
840 ins->type = STACK_I4;
841 ins->opcode += unops_op_map [src1->type];
844 ins->type = STACK_R8;
845 switch (src1->type) {
848 ins->opcode = OP_ICONV_TO_R_UN;
851 ins->opcode = OP_LCONV_TO_R_UN;
855 case CEE_CONV_OVF_I1:
856 case CEE_CONV_OVF_U1:
857 case CEE_CONV_OVF_I2:
858 case CEE_CONV_OVF_U2:
859 case CEE_CONV_OVF_I4:
860 case CEE_CONV_OVF_U4:
861 ins->type = STACK_I4;
862 ins->opcode += ovf3ops_op_map [src1->type];
864 case CEE_CONV_OVF_I_UN:
865 case CEE_CONV_OVF_U_UN:
866 ins->type = STACK_PTR;
867 ins->opcode += ovf2ops_op_map [src1->type];
869 case CEE_CONV_OVF_I1_UN:
870 case CEE_CONV_OVF_I2_UN:
871 case CEE_CONV_OVF_I4_UN:
872 case CEE_CONV_OVF_U1_UN:
873 case CEE_CONV_OVF_U2_UN:
874 case CEE_CONV_OVF_U4_UN:
875 ins->type = STACK_I4;
876 ins->opcode += ovf2ops_op_map [src1->type];
879 ins->type = STACK_PTR;
880 switch (src1->type) {
882 ins->opcode = OP_MOVE;
886 #if SIZEOF_VOID_P == 8
887 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_MOVE;
893 ins->opcode = OP_LCONV_TO_U;
896 ins->opcode = OP_FCONV_TO_U;
902 ins->type = STACK_I8;
903 ins->opcode += unops_op_map [src1->type];
905 case CEE_CONV_OVF_I8:
906 case CEE_CONV_OVF_U8:
907 ins->type = STACK_I8;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_U8_UN:
911 case CEE_CONV_OVF_I8_UN:
912 ins->type = STACK_I8;
913 ins->opcode += ovf2ops_op_map [src1->type];
917 ins->type = STACK_R8;
918 ins->opcode += unops_op_map [src1->type];
921 ins->type = STACK_R8;
925 ins->type = STACK_I4;
926 ins->opcode += ovfops_op_map [src1->type];
931 ins->type = STACK_PTR;
932 ins->opcode += ovfops_op_map [src1->type];
940 ins->type = bin_num_table [src1->type] [src2->type];
941 ins->opcode += ovfops_op_map [src1->type];
942 if (ins->type == STACK_R8)
943 ins->type = STACK_INV;
945 case OP_LOAD_MEMBASE:
946 ins->type = STACK_PTR;
948 case OP_LOADI1_MEMBASE:
949 case OP_LOADU1_MEMBASE:
950 case OP_LOADI2_MEMBASE:
951 case OP_LOADU2_MEMBASE:
952 case OP_LOADI4_MEMBASE:
953 case OP_LOADU4_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI8_MEMBASE:
957 ins->type = STACK_I8;
959 case OP_LOADR4_MEMBASE:
960 case OP_LOADR8_MEMBASE:
961 ins->type = STACK_R8;
964 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
968 if (ins->type == STACK_MP)
969 ins->klass = mono_defaults.object_class;
974 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
980 param_table [STACK_MAX] [STACK_MAX] = {
985 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
989 switch (args->type) {
999 for (i = 0; i < sig->param_count; ++i) {
1000 switch (args [i].type) {
1004 if (!sig->params [i]->byref)
1008 if (sig->params [i]->byref)
1010 switch (sig->params [i]->type) {
1011 case MONO_TYPE_CLASS:
1012 case MONO_TYPE_STRING:
1013 case MONO_TYPE_OBJECT:
1014 case MONO_TYPE_SZARRAY:
1015 case MONO_TYPE_ARRAY:
1022 if (sig->params [i]->byref)
1024 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1033 /*if (!param_table [args [i].type] [sig->params [i]->type])
1041 * When we need a pointer to the current domain many times in a method, we
1042 * call mono_domain_get() once and we store the result in a local variable.
1043 * This function returns the variable that represents the MonoDomain*.
1045 inline static MonoInst *
1046 mono_get_domainvar (MonoCompile *cfg)
1048 if (!cfg->domainvar)
1049 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 return cfg->domainvar;
1054 * The got_var contains the address of the Global Offset Table when AOT
1057 inline static MonoInst *
1058 mono_get_got_var (MonoCompile *cfg)
1060 #ifdef MONO_ARCH_NEED_GOT_VAR
1061 if (!cfg->compile_aot)
1063 if (!cfg->got_var) {
1064 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1066 return cfg->got_var;
1073 mono_get_vtable_var (MonoCompile *cfg)
1075 g_assert (cfg->generic_sharing_context);
1077 if (!cfg->rgctx_var) {
1078 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1079 /* force the var to be stack allocated */
1080 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1083 return cfg->rgctx_var;
1087 type_from_stack_type (MonoInst *ins) {
1088 switch (ins->type) {
1089 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1090 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1091 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1092 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1094 return &ins->klass->this_arg;
1095 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1096 case STACK_VTYPE: return &ins->klass->byval_arg;
1098 g_error ("stack type %d to monotype not handled\n", ins->type);
1103 static G_GNUC_UNUSED int
1104 type_to_stack_type (MonoType *t)
1106 switch (mono_type_get_underlying_type (t)->type) {
1109 case MONO_TYPE_BOOLEAN:
1112 case MONO_TYPE_CHAR:
1119 case MONO_TYPE_FNPTR:
1121 case MONO_TYPE_CLASS:
1122 case MONO_TYPE_STRING:
1123 case MONO_TYPE_OBJECT:
1124 case MONO_TYPE_SZARRAY:
1125 case MONO_TYPE_ARRAY:
1133 case MONO_TYPE_VALUETYPE:
1134 case MONO_TYPE_TYPEDBYREF:
1136 case MONO_TYPE_GENERICINST:
1137 if (mono_type_generic_inst_is_valuetype (t))
1143 g_assert_not_reached ();
1150 array_access_to_klass (int opcode)
1154 return mono_defaults.byte_class;
1156 return mono_defaults.uint16_class;
1159 return mono_defaults.int_class;
1162 return mono_defaults.sbyte_class;
1165 return mono_defaults.int16_class;
1168 return mono_defaults.int32_class;
1170 return mono_defaults.uint32_class;
1173 return mono_defaults.int64_class;
1176 return mono_defaults.single_class;
1179 return mono_defaults.double_class;
1180 case CEE_LDELEM_REF:
1181 case CEE_STELEM_REF:
1182 return mono_defaults.object_class;
1184 g_assert_not_reached ();
1190 * We try to share variables when possible
1193 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1198 /* inlining can result in deeper stacks */
1199 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1200 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1202 pos = ins->type - 1 + slot * STACK_MAX;
1204 switch (ins->type) {
1211 if ((vnum = cfg->intvars [pos]))
1212 return cfg->varinfo [vnum];
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 cfg->intvars [pos] = res->inst_c0;
1217 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1226 * Don't use this if a generic_context is set, since that means AOT can't
1227 * look up the method using just the image+token.
1228 * table == 0 means this is a reference made from a wrapper.
1230 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1231 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1232 jump_info_token->image = image;
1233 jump_info_token->token = token;
1234 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1239 * This function is called to handle items that are left on the evaluation stack
1240 * at basic block boundaries. What happens is that we save the values to local variables
1241 * and we reload them later when first entering the target basic block (with the
1242 * handle_loaded_temps () function).
1243 * A single joint point will use the same variables (stored in the array bb->out_stack or
1244 * bb->in_stack, if the basic block is before or after the joint point).
1246 * This function needs to be called _before_ emitting the last instruction of
1247 * the bb (i.e. before emitting a branch).
1248 * If the stack merge fails at a join point, cfg->unverifiable is set.
1251 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1254 MonoBasicBlock *bb = cfg->cbb;
1255 MonoBasicBlock *outb;
1256 MonoInst *inst, **locals;
1261 if (cfg->verbose_level > 3)
1262 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1263 if (!bb->out_scount) {
1264 bb->out_scount = count;
1265 //printf ("bblock %d has out:", bb->block_num);
1267 for (i = 0; i < bb->out_count; ++i) {
1268 outb = bb->out_bb [i];
1269 /* exception handlers are linked, but they should not be considered for stack args */
1270 if (outb->flags & BB_EXCEPTION_HANDLER)
1272 //printf (" %d", outb->block_num);
1273 if (outb->in_stack) {
1275 bb->out_stack = outb->in_stack;
1281 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1282 for (i = 0; i < count; ++i) {
1284 * try to reuse temps already allocated for this purpouse, if they occupy the same
1285 * stack slot and if they are of the same type.
1286 * This won't cause conflicts since if 'local' is used to
1287 * store one of the values in the in_stack of a bblock, then
1288 * the same variable will be used for the same outgoing stack
1290 * This doesn't work when inlining methods, since the bblocks
1291 * in the inlined methods do not inherit their in_stack from
1292 * the bblock they are inlined to. See bug #58863 for an
1295 if (cfg->inlined_method)
1296 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1298 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1303 for (i = 0; i < bb->out_count; ++i) {
1304 outb = bb->out_bb [i];
1305 /* exception handlers are linked, but they should not be considered for stack args */
1306 if (outb->flags & BB_EXCEPTION_HANDLER)
1308 if (outb->in_scount) {
1309 if (outb->in_scount != bb->out_scount) {
1310 cfg->unverifiable = TRUE;
1313 continue; /* check they are the same locals */
1315 outb->in_scount = count;
1316 outb->in_stack = bb->out_stack;
1319 locals = bb->out_stack;
1321 for (i = 0; i < count; ++i) {
1322 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1323 inst->cil_code = sp [i]->cil_code;
1324 sp [i] = locals [i];
1325 if (cfg->verbose_level > 3)
1326 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1330 * It is possible that the out bblocks already have in_stack assigned, and
1331 * the in_stacks differ. In this case, we will store to all the different
1338 /* Find a bblock which has a different in_stack */
1340 while (bindex < bb->out_count) {
1341 outb = bb->out_bb [bindex];
1342 /* exception handlers are linked, but they should not be considered for stack args */
1343 if (outb->flags & BB_EXCEPTION_HANDLER) {
1347 if (outb->in_stack != locals) {
1348 for (i = 0; i < count; ++i) {
1349 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1350 inst->cil_code = sp [i]->cil_code;
1351 sp [i] = locals [i];
1352 if (cfg->verbose_level > 3)
1353 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1355 locals = outb->in_stack;
1364 /* Emit code which loads interface_offsets [klass->interface_id]
1365 * The array is stored in memory before vtable.
1368 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1370 if (cfg->compile_aot) {
1371 int ioffset_reg = alloc_preg (cfg);
1372 int iid_reg = alloc_preg (cfg);
1374 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1384 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1385 * stored in "klass_reg" implements the interface "klass".
1388 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1390 int ibitmap_reg = alloc_preg (cfg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1418 * stored in "vtable_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1423 int ibitmap_reg = alloc_preg (cfg);
1424 int ibitmap_byte_reg = alloc_preg (cfg);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 int shifted_iid_reg = alloc_preg (cfg);
1431 int ibitmap_byte_address_reg = alloc_preg (cfg);
1432 int masked_iid_reg = alloc_preg (cfg);
1433 int iid_one_bit_reg = alloc_preg (cfg);
1434 int iid_bit_reg = alloc_preg (cfg);
1435 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1440 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1441 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1442 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1444 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1450 * Emit code which checks whenever the interface id of @klass is smaller than
1451 * than the value given by max_iid_reg.
1454 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 if (cfg->compile_aot) {
1458 int iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1465 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1467 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1470 /* Same as above, but obtains max_iid from a vtable */
1472 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1473 MonoBasicBlock *false_target)
1475 int max_iid_reg = alloc_preg (cfg);
1477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1478 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 /* Same as above, but obtains max_iid from a klass */
1483 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1484 MonoBasicBlock *false_target)
1486 int max_iid_reg = alloc_preg (cfg);
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1489 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1493 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1495 int idepth_reg = alloc_preg (cfg);
1496 int stypes_reg = alloc_preg (cfg);
1497 int stype = alloc_preg (cfg);
1499 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1506 if (cfg->compile_aot) {
1507 int const_reg = alloc_preg (cfg);
1508 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1509 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1517 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1519 int intf_reg = alloc_preg (cfg);
1521 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1522 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1525 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1527 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1531 * Variant of the above that takes a register to the class, not the vtable.
1534 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1536 int intf_bit_reg = alloc_preg (cfg);
1538 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1539 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1544 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1548 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1550 if (cfg->compile_aot) {
1551 int const_reg = alloc_preg (cfg);
1552 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1553 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1557 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1561 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1563 if (cfg->compile_aot) {
1564 int const_reg = alloc_preg (cfg);
1565 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1574 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1577 int rank_reg = alloc_preg (cfg);
1578 int eclass_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1582 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1583 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1585 if (klass->cast_class == mono_defaults.object_class) {
1586 int parent_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1588 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1589 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1590 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1591 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1592 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1593 } else if (klass->cast_class == mono_defaults.enum_class) {
1594 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1595 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1596 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1598 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1599 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1602 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1603 /* Check that the object is a vector too */
1604 int bounds_reg = alloc_preg (cfg);
1605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1607 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1610 int idepth_reg = alloc_preg (cfg);
1611 int stypes_reg = alloc_preg (cfg);
1612 int stype = alloc_preg (cfg);
1614 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1615 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1617 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1621 mini_emit_class_check (cfg, stype, klass);
1626 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1630 g_assert (val == 0);
1635 if ((size <= 4) && (size <= align)) {
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1644 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1646 #if SIZEOF_VOID_P == 8
1648 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1654 val_reg = alloc_preg (cfg);
1656 if (sizeof (gpointer) == 8)
1657 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1659 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1662 /* This could be optimized further if neccesary */
1664 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1671 #if !NO_UNALIGNED_ACCESS
1672 if (sizeof (gpointer) == 8) {
1674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1679 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1703 #endif /* DISABLE_JIT */
1706 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1714 /* This could be optimized further if neccesary */
1716 cur_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1718 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1725 #if !NO_UNALIGNED_ACCESS
1726 if (sizeof (gpointer) == 8) {
1728 cur_reg = alloc_preg (cfg);
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1739 cur_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1747 cur_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1767 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1770 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1773 type = mini_get_basic_type_from_generic (gsctx, type);
1774 switch (type->type) {
1775 case MONO_TYPE_VOID:
1776 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1779 case MONO_TYPE_BOOLEAN:
1782 case MONO_TYPE_CHAR:
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 case MONO_TYPE_FNPTR:
1790 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1791 case MONO_TYPE_CLASS:
1792 case MONO_TYPE_STRING:
1793 case MONO_TYPE_OBJECT:
1794 case MONO_TYPE_SZARRAY:
1795 case MONO_TYPE_ARRAY:
1796 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1799 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = type->data.klass->enum_basetype;
1808 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1821 * target_type_is_incompatible:
1822 * @cfg: MonoCompile context
1824 * Check that the item @arg on the evaluation stack can be stored
1825 * in the target type (can be a local, or field, etc).
1826 * The cfg arg can be used to check if we need verification or just
1829 * Returns: non-0 value if arg can't be stored on a target.
1832 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1834 MonoType *simple_type;
1837 if (target->byref) {
1838 /* FIXME: check that the pointed to types match */
1839 if (arg->type == STACK_MP)
1840 return arg->klass != mono_class_from_mono_type (target);
1841 if (arg->type == STACK_PTR)
1846 simple_type = mono_type_get_underlying_type (target);
1847 switch (simple_type->type) {
1848 case MONO_TYPE_VOID:
1852 case MONO_TYPE_BOOLEAN:
1855 case MONO_TYPE_CHAR:
1858 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1862 /* STACK_MP is needed when setting pinned locals */
1863 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1868 case MONO_TYPE_FNPTR:
1869 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1872 case MONO_TYPE_CLASS:
1873 case MONO_TYPE_STRING:
1874 case MONO_TYPE_OBJECT:
1875 case MONO_TYPE_SZARRAY:
1876 case MONO_TYPE_ARRAY:
1877 if (arg->type != STACK_OBJ)
1879 /* FIXME: check type compatibility */
1883 if (arg->type != STACK_I8)
1888 if (arg->type != STACK_R8)
1891 case MONO_TYPE_VALUETYPE:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_TYPEDBYREF:
1899 if (arg->type != STACK_VTYPE)
1901 klass = mono_class_from_mono_type (simple_type);
1902 if (klass != arg->klass)
1905 case MONO_TYPE_GENERICINST:
1906 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1907 if (arg->type != STACK_VTYPE)
1909 klass = mono_class_from_mono_type (simple_type);
1910 if (klass != arg->klass)
1914 if (arg->type != STACK_OBJ)
1916 /* FIXME: check type compatibility */
1920 case MONO_TYPE_MVAR:
1921 /* FIXME: all the arguments must be references for now,
1922 * later look inside cfg and see if the arg num is
1923 * really a reference
1925 g_assert (cfg->generic_sharing_context);
1926 if (arg->type != STACK_OBJ)
1930 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1936 * Prepare arguments for passing to a function call.
1937 * Return a non-zero value if the arguments can't be passed to the given
1939 * The type checks are not yet complete and some conversions may need
1940 * casts on 32 or 64 bit architectures.
1942 * FIXME: implement this using target_type_is_incompatible ()
1945 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1947 MonoType *simple_type;
1951 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1955 for (i = 0; i < sig->param_count; ++i) {
1956 if (sig->params [i]->byref) {
1957 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1961 simple_type = sig->params [i];
1962 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1964 switch (simple_type->type) {
1965 case MONO_TYPE_VOID:
1970 case MONO_TYPE_BOOLEAN:
1973 case MONO_TYPE_CHAR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1982 case MONO_TYPE_FNPTR:
1983 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1986 case MONO_TYPE_CLASS:
1987 case MONO_TYPE_STRING:
1988 case MONO_TYPE_OBJECT:
1989 case MONO_TYPE_SZARRAY:
1990 case MONO_TYPE_ARRAY:
1991 if (args [i]->type != STACK_OBJ)
1996 if (args [i]->type != STACK_I8)
2001 if (args [i]->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (simple_type->data.klass->enumtype) {
2006 simple_type = simple_type->data.klass->enum_basetype;
2009 if (args [i]->type != STACK_VTYPE)
2012 case MONO_TYPE_TYPEDBYREF:
2013 if (args [i]->type != STACK_VTYPE)
2016 case MONO_TYPE_GENERICINST:
2017 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2021 g_error ("unknown type 0x%02x in check_call_signature",
2029 callvirt_to_call (int opcode)
2034 case OP_VOIDCALLVIRT:
2043 g_assert_not_reached ();
2050 callvirt_to_call_membase (int opcode)
2054 return OP_CALL_MEMBASE;
2055 case OP_VOIDCALLVIRT:
2056 return OP_VOIDCALL_MEMBASE;
2058 return OP_FCALL_MEMBASE;
2060 return OP_LCALL_MEMBASE;
2062 return OP_VCALL_MEMBASE;
2064 g_assert_not_reached ();
2070 #ifdef MONO_ARCH_HAVE_IMT
2072 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2074 #ifdef MONO_ARCH_IMT_REG
2075 int method_reg = alloc_preg (cfg);
2078 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2079 } else if (cfg->compile_aot) {
2080 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2083 MONO_INST_NEW (cfg, ins, OP_PCONST);
2084 ins->inst_p0 = call->method;
2085 ins->dreg = method_reg;
2086 MONO_ADD_INS (cfg->cbb, ins);
2089 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2091 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2096 static MonoJumpInfo *
2097 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2099 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2103 ji->data.target = target;
2108 inline static MonoInst*
2109 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2111 inline static MonoCallInst *
2112 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2113 MonoInst **args, int calli, int virtual)
2116 #ifdef MONO_ARCH_SOFT_FLOAT
2120 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2123 call->signature = sig;
2125 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2127 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2128 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2131 temp->backend.is_pinvoke = sig->pinvoke;
2134 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2135 * address of return value to increase optimization opportunities.
2136 * Before vtype decomposition, the dreg of the call ins itself represents the
2137 * fact the call modifies the return value. After decomposition, the call will
2138 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2139 * will be transformed into an LDADDR.
2141 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2142 loada->dreg = alloc_preg (cfg);
2143 loada->inst_p0 = temp;
2144 /* We reference the call too since call->dreg could change during optimization */
2145 loada->inst_p1 = call;
2146 MONO_ADD_INS (cfg->cbb, loada);
2148 call->inst.dreg = temp->dreg;
2150 call->vret_var = loada;
2151 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2152 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2154 #ifdef MONO_ARCH_SOFT_FLOAT
2156 * If the call has a float argument, we would need to do an r8->r4 conversion using
2157 * an icall, but that cannot be done during the call sequence since it would clobber
2158 * the call registers + the stack. So we do it before emitting the call.
2160 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2162 MonoInst *in = call->args [i];
2164 if (i >= sig->hasthis)
2165 t = sig->params [i - sig->hasthis];
2167 t = &mono_defaults.int_class->byval_arg;
2168 t = mono_type_get_underlying_type (t);
2170 if (!t->byref && t->type == MONO_TYPE_R4) {
2171 MonoInst *iargs [1];
2175 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2177 /* The result will be in an int vreg */
2178 call->args [i] = conv;
2183 mono_arch_emit_call (cfg, call);
2185 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2186 cfg->flags |= MONO_CFG_HAS_CALLS;
2191 inline static MonoInst*
2192 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2194 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2196 call->inst.sreg1 = addr->dreg;
2198 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2200 return (MonoInst*)call;
2203 inline static MonoInst*
2204 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2206 #ifdef MONO_ARCH_RGCTX_REG
2211 rgctx_reg = mono_alloc_preg (cfg);
2212 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2214 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2216 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2217 cfg->uses_rgctx_reg = TRUE;
2219 return (MonoInst*)call;
2221 g_assert_not_reached ();
2227 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2228 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2230 gboolean virtual = this != NULL;
2231 gboolean enable_for_aot = TRUE;
2234 if (method->string_ctor) {
2235 /* Create the real signature */
2236 /* FIXME: Cache these */
2237 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2238 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2243 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2245 if (this && sig->hasthis &&
2246 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2247 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2248 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2250 call->method = method;
2252 call->inst.flags |= MONO_INST_HAS_METHOD;
2253 call->inst.inst_left = this;
2256 int vtable_reg, slot_reg, this_reg;
2258 this_reg = this->dreg;
2260 if ((!cfg->compile_aot || enable_for_aot) &&
2261 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2262 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2263 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2265 * the method is not virtual, we just need to ensure this is not null
2266 * and then we can call the method directly.
2268 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2269 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2272 if (!method->string_ctor) {
2273 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2274 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2275 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2278 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2280 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2282 return (MonoInst*)call;
2285 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2286 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2287 /* Make a call to delegate->invoke_impl */
2288 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2289 call->inst.inst_basereg = this_reg;
2290 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2291 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2293 return (MonoInst*)call;
2297 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2298 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2299 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2301 * the method is virtual, but we can statically dispatch since either
2302 * it's class or the method itself are sealed.
2303 * But first we need to ensure it's not a null reference.
2305 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2306 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2307 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2309 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2310 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2312 return (MonoInst*)call;
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2317 vtable_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2319 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2321 #ifdef MONO_ARCH_HAVE_IMT
2323 guint32 imt_slot = mono_method_get_imt_slot (method);
2324 emit_imt_argument (cfg, call, imt_arg);
2325 slot_reg = vtable_reg;
2326 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2329 if (slot_reg == -1) {
2330 slot_reg = alloc_preg (cfg);
2331 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2332 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2335 slot_reg = vtable_reg;
2336 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2337 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2339 g_assert (mono_method_signature (method)->generic_param_count);
2340 emit_imt_argument (cfg, call, imt_arg);
2344 call->inst.sreg1 = slot_reg;
2345 call->virtual = TRUE;
2348 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2350 return (MonoInst*)call;
2354 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2355 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2362 #ifdef MONO_ARCH_RGCTX_REG
2363 rgctx_reg = mono_alloc_preg (cfg);
2364 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2369 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2371 call = (MonoCallInst*)ins;
2373 #ifdef MONO_ARCH_RGCTX_REG
2374 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2375 cfg->uses_rgctx_reg = TRUE;
2384 static inline MonoInst*
2385 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2387 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2391 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2398 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2401 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2403 return (MonoInst*)call;
2406 inline static MonoInst*
2407 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2409 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2413 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2417 * mono_emit_abs_call:
2419 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2421 inline static MonoInst*
2422 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2423 MonoMethodSignature *sig, MonoInst **args)
2425 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2429 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2432 if (cfg->abs_patches == NULL)
2433 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2434 g_hash_table_insert (cfg->abs_patches, ji, ji);
2435 ins = mono_emit_native_call (cfg, ji, sig, args);
2436 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2441 get_memcpy_method (void)
2443 static MonoMethod *memcpy_method = NULL;
2444 if (!memcpy_method) {
2445 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2447 g_error ("Old corlib found. Install a new one");
2449 return memcpy_method;
2453 * Emit code to copy a valuetype of type @klass whose address is stored in
2454 * @src->dreg to memory whose address is stored at @dest->dreg.
2457 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2459 MonoInst *iargs [3];
2462 MonoMethod *memcpy_method;
2466 * This check breaks with spilled vars... need to handle it during verification anyway.
2467 * g_assert (klass && klass == src->klass && klass == dest->klass);
2471 n = mono_class_native_size (klass, &align);
2473 n = mono_class_value_size (klass, &align);
2475 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2476 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2477 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2481 EMIT_NEW_ICONST (cfg, iargs [2], n);
2483 memcpy_method = get_memcpy_method ();
2484 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2489 get_memset_method (void)
2491 static MonoMethod *memset_method = NULL;
2492 if (!memset_method) {
2493 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2495 g_error ("Old corlib found. Install a new one");
2497 return memset_method;
2501 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2503 MonoInst *iargs [3];
2506 MonoMethod *memset_method;
2508 /* FIXME: Optimize this for the case when dest is an LDADDR */
2510 mono_class_init (klass);
2511 n = mono_class_value_size (klass, &align);
2513 if (n <= sizeof (gpointer) * 5) {
2514 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2517 memset_method = get_memset_method ();
2519 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2520 EMIT_NEW_ICONST (cfg, iargs [2], n);
2521 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2526 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2528 MonoInst *this = NULL;
2530 g_assert (cfg->generic_sharing_context);
2532 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2533 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2534 !method->klass->valuetype)
2535 EMIT_NEW_ARGLOAD (cfg, this, 0);
2537 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2538 MonoInst *mrgctx_loc, *mrgctx_var;
2541 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2543 mrgctx_loc = mono_get_vtable_var (cfg);
2544 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2547 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2548 MonoInst *vtable_loc, *vtable_var;
2552 vtable_loc = mono_get_vtable_var (cfg);
2553 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2555 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2556 MonoInst *mrgctx_var = vtable_var;
2559 vtable_reg = alloc_preg (cfg);
2560 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2561 vtable_var->type = STACK_PTR;
2567 int vtable_reg, res_reg;
2569 vtable_reg = alloc_preg (cfg);
2570 res_reg = alloc_preg (cfg);
2571 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2576 static MonoJumpInfoRgctxEntry *
2577 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2579 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2580 res->method = method;
2581 res->in_mrgctx = in_mrgctx;
2582 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2583 res->data->type = patch_type;
2584 res->data->data.target = patch_data;
2585 res->info_type = info_type;
2590 static inline MonoInst*
2591 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2593 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2597 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2598 MonoClass *klass, int rgctx_type)
2600 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2601 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2603 return emit_rgctx_fetch (cfg, rgctx, entry);
2607 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2608 MonoMethod *cmethod, int rgctx_type)
2610 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2611 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2613 return emit_rgctx_fetch (cfg, rgctx, entry);
2617 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2618 MonoClassField *field, int rgctx_type)
2620 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2621 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2623 return emit_rgctx_fetch (cfg, rgctx, entry);
2627 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2629 int vtable_reg = alloc_preg (cfg);
2630 int context_used = 0;
2632 if (cfg->generic_sharing_context)
2633 context_used = mono_class_check_context_used (array_class);
2635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2637 if (cfg->opt & MONO_OPT_SHARED) {
2638 int class_reg = alloc_preg (cfg);
2639 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2640 if (cfg->compile_aot) {
2641 int klass_reg = alloc_preg (cfg);
2642 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2643 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2647 } else if (context_used) {
2648 MonoInst *vtable_ins;
2650 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2653 if (cfg->compile_aot) {
2654 int vt_reg = alloc_preg (cfg);
2655 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2656 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2662 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2666 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2668 if (mini_get_debug_options ()->better_cast_details) {
2669 int to_klass_reg = alloc_preg (cfg);
2670 int vtable_reg = alloc_preg (cfg);
2671 int klass_reg = alloc_preg (cfg);
2672 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2675 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2679 MONO_ADD_INS (cfg->cbb, tls_get);
2680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2684 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2690 reset_cast_details (MonoCompile *cfg)
2692 /* Reset the variables holding the cast details */
2693 if (mini_get_debug_options ()->better_cast_details) {
2694 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2696 MONO_ADD_INS (cfg->cbb, tls_get);
2697 /* It is enough to reset the from field */
2698 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2703 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2704 * generic code is generated.
2707 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2709 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2712 MonoInst *rgctx, *addr;
2714 /* FIXME: What if the class is shared? We might not
2715 have to get the address of the method from the
2717 addr = emit_get_rgctx_method (cfg, context_used, method,
2718 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2720 rgctx = emit_get_rgctx (cfg, method, context_used);
2722 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2724 return mono_emit_method_call (cfg, method, &val, NULL);
2729 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2733 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2734 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2735 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2736 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2738 obj_reg = sp [0]->dreg;
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2742 /* FIXME: generics */
2743 g_assert (klass->rank == 0);
2746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2747 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2753 MonoInst *element_class;
2755 /* This assertion is from the unboxcast insn */
2756 g_assert (klass->rank == 0);
2758 element_class = emit_get_rgctx_klass (cfg, context_used,
2759 klass->element_class, MONO_RGCTX_INFO_KLASS);
2761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2764 save_cast_details (cfg, klass->element_class, obj_reg);
2765 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2766 reset_cast_details (cfg);
2769 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2770 MONO_ADD_INS (cfg->cbb, add);
2771 add->type = STACK_MP;
2778 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2780 MonoInst *iargs [2];
2783 if (cfg->opt & MONO_OPT_SHARED) {
2784 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2785 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2787 alloc_ftn = mono_object_new;
2788 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2789 /* This happens often in argument checking code, eg. throw new FooException... */
2790 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2791 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2792 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2794 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2795 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2798 if (managed_alloc) {
2799 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2800 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2802 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2804 guint32 lw = vtable->klass->instance_size;
2805 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2806 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2807 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2810 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2814 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2818 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2821 MonoInst *iargs [2];
2822 MonoMethod *managed_alloc = NULL;
2826 FIXME: we cannot get managed_alloc here because we can't get
2827 the class's vtable (because it's not a closed class)
2829 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2830 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2833 if (cfg->opt & MONO_OPT_SHARED) {
2834 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2835 iargs [1] = data_inst;
2836 alloc_ftn = mono_object_new;
2838 if (managed_alloc) {
2839 iargs [0] = data_inst;
2840 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2843 iargs [0] = data_inst;
2844 alloc_ftn = mono_object_new_specific;
2847 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2851 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2853 MonoInst *alloc, *ins;
2855 if (mono_class_is_nullable (klass)) {
2856 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2857 return mono_emit_method_call (cfg, method, &val, NULL);
2860 alloc = handle_alloc (cfg, klass, TRUE);
2862 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2868 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2870 MonoInst *alloc, *ins;
2872 if (mono_class_is_nullable (klass)) {
2873 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2874 /* FIXME: What if the class is shared? We might not
2875 have to get the method address from the RGCTX. */
2876 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2877 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2878 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2880 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2882 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2884 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2891 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2893 MonoBasicBlock *is_null_bb;
2894 int obj_reg = src->dreg;
2895 int vtable_reg = alloc_preg (cfg);
2897 NEW_BBLOCK (cfg, is_null_bb);
2899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2902 save_cast_details (cfg, klass, obj_reg);
2904 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2906 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2908 int klass_reg = alloc_preg (cfg);
2910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2912 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2913 /* the remoting code is broken, access the class for now */
2915 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2921 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2924 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2928 MONO_START_BB (cfg, is_null_bb);
2930 reset_cast_details (cfg);
2936 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2939 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2940 int obj_reg = src->dreg;
2941 int vtable_reg = alloc_preg (cfg);
2942 int res_reg = alloc_preg (cfg);
2944 NEW_BBLOCK (cfg, is_null_bb);
2945 NEW_BBLOCK (cfg, false_bb);
2946 NEW_BBLOCK (cfg, end_bb);
2948 /* Do the assignment at the beginning, so the other assignment can be if converted */
2949 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2950 ins->type = STACK_OBJ;
2953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2956 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2957 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2958 /* the is_null_bb target simply copies the input register to the output */
2959 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2961 int klass_reg = alloc_preg (cfg);
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2966 int rank_reg = alloc_preg (cfg);
2967 int eclass_reg = alloc_preg (cfg);
2969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2974 if (klass->cast_class == mono_defaults.object_class) {
2975 int parent_reg = alloc_preg (cfg);
2976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2977 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2978 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2980 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2981 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2982 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2983 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2984 } else if (klass->cast_class == mono_defaults.enum_class) {
2985 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2987 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2988 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2990 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2991 /* Check that the object is a vector too */
2992 int bounds_reg = alloc_preg (cfg);
2993 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2995 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2998 /* the is_null_bb target simply copies the input register to the output */
2999 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3001 } else if (mono_class_is_nullable (klass)) {
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3003 /* the is_null_bb target simply copies the input register to the output */
3004 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3006 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3007 /* the remoting code is broken, access the class for now */
3009 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3012 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3015 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3016 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3018 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3019 /* the is_null_bb target simply copies the input register to the output */
3020 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3025 MONO_START_BB (cfg, false_bb);
3027 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3028 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3030 MONO_START_BB (cfg, is_null_bb);
3032 MONO_START_BB (cfg, end_bb);
3038 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3040 /* This opcode takes as input an object reference and a class, and returns:
3041 0) if the object is an instance of the class,
3042 1) if the object is not instance of the class,
3043 2) if the object is a proxy whose type cannot be determined */
3046 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3047 int obj_reg = src->dreg;
3048 int dreg = alloc_ireg (cfg);
3050 int klass_reg = alloc_preg (cfg);
3052 NEW_BBLOCK (cfg, true_bb);
3053 NEW_BBLOCK (cfg, false_bb);
3054 NEW_BBLOCK (cfg, false2_bb);
3055 NEW_BBLOCK (cfg, end_bb);
3056 NEW_BBLOCK (cfg, no_proxy_bb);
3058 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3061 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3062 NEW_BBLOCK (cfg, interface_fail_bb);
3064 tmp_reg = alloc_preg (cfg);
3065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3066 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3067 MONO_START_BB (cfg, interface_fail_bb);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3070 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3077 tmp_reg = alloc_preg (cfg);
3078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3081 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3082 tmp_reg = alloc_preg (cfg);
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3086 tmp_reg = alloc_preg (cfg);
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3091 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3094 MONO_START_BB (cfg, no_proxy_bb);
3096 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3099 MONO_START_BB (cfg, false_bb);
3101 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3102 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3104 MONO_START_BB (cfg, false2_bb);
3106 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3109 MONO_START_BB (cfg, true_bb);
3111 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3113 MONO_START_BB (cfg, end_bb);
3116 MONO_INST_NEW (cfg, ins, OP_ICONST);
3118 ins->type = STACK_I4;
3124 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3126 /* This opcode takes as input an object reference and a class, and returns:
3127 0) if the object is an instance of the class,
3128 1) if the object is a proxy whose type cannot be determined
3129 an InvalidCastException exception is thrown otherwhise*/
3132 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3133 int obj_reg = src->dreg;
3134 int dreg = alloc_ireg (cfg);
3135 int tmp_reg = alloc_preg (cfg);
3136 int klass_reg = alloc_preg (cfg);
3138 NEW_BBLOCK (cfg, end_bb);
3139 NEW_BBLOCK (cfg, ok_result_bb);
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 NEW_BBLOCK (cfg, interface_fail_bb);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3148 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3149 MONO_START_BB (cfg, interface_fail_bb);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3152 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3154 tmp_reg = alloc_preg (cfg);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3157 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3163 NEW_BBLOCK (cfg, no_proxy_bb);
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3167 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3169 tmp_reg = alloc_preg (cfg);
3170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3171 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3173 tmp_reg = alloc_preg (cfg);
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3178 NEW_BBLOCK (cfg, fail_1_bb);
3180 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3182 MONO_START_BB (cfg, fail_1_bb);
3184 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3187 MONO_START_BB (cfg, no_proxy_bb);
3189 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3192 MONO_START_BB (cfg, ok_result_bb);
3194 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3196 MONO_START_BB (cfg, end_bb);
3199 MONO_INST_NEW (cfg, ins, OP_ICONST);
3201 ins->type = STACK_I4;
3206 static G_GNUC_UNUSED MonoInst*
3207 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3209 gpointer *trampoline;
3210 MonoInst *obj, *method_ins, *tramp_ins;
3214 obj = handle_alloc (cfg, klass, FALSE);
3216 /* Inline the contents of mono_delegate_ctor */
3218 /* Set target field */
3219 /* Optimize away setting of NULL target */
3220 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3221 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3223 /* Set method field */
3224 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3225 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3228 * To avoid looking up the compiled code belonging to the target method
3229 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3230 * store it, and we fill it after the method has been compiled.
3232 if (!cfg->compile_aot && !method->dynamic) {
3233 MonoInst *code_slot_ins;
3235 domain = mono_domain_get ();
3236 mono_domain_lock (domain);
3237 if (!domain_jit_info (domain)->method_code_hash)
3238 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3239 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3241 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3242 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3244 mono_domain_unlock (domain);
3246 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3250 /* Set invoke_impl field */
3251 trampoline = mono_create_delegate_trampoline (klass);
3252 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3255 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3261 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3263 MonoJitICallInfo *info;
3265 /* Need to register the icall so it gets an icall wrapper */
3266 info = mono_get_array_new_va_icall (rank);
3268 cfg->flags |= MONO_CFG_HAS_VARARGS;
3270 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3271 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3275 mono_emit_load_got_addr (MonoCompile *cfg)
3277 MonoInst *getaddr, *dummy_use;
3279 if (!cfg->got_var || cfg->got_var_allocated)
3282 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3283 getaddr->dreg = cfg->got_var->dreg;
3285 /* Add it to the start of the first bblock */
3286 if (cfg->bb_entry->code) {
3287 getaddr->next = cfg->bb_entry->code;
3288 cfg->bb_entry->code = getaddr;
3291 MONO_ADD_INS (cfg->bb_entry, getaddr);
3293 cfg->got_var_allocated = TRUE;
3296 * Add a dummy use to keep the got_var alive, since real uses might
3297 * only be generated by the back ends.
3298 * Add it to end_bblock, so the variable's lifetime covers the whole
3300 * It would be better to make the usage of the got var explicit in all
3301 * cases when the backend needs it (i.e. calls, throw etc.), so this
3302 * wouldn't be needed.
3304 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3305 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3309 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3311 MonoMethodHeader *header = mono_method_get_header (method);
3313 #ifdef MONO_ARCH_SOFT_FLOAT
3314 MonoMethodSignature *sig = mono_method_signature (method);
3318 if (cfg->generic_sharing_context)
3321 #ifdef MONO_ARCH_HAVE_LMF_OPS
3322 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3323 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3324 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3328 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3329 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3330 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3331 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3332 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3333 (method->klass->marshalbyref) ||
3334 !header || header->num_clauses)
3337 /* also consider num_locals? */
3338 /* Do the size check early to avoid creating vtables */
3339 if (getenv ("MONO_INLINELIMIT")) {
3340 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3343 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3347 * if we can initialize the class of the method right away, we do,
3348 * otherwise we don't allow inlining if the class needs initialization,
3349 * since it would mean inserting a call to mono_runtime_class_init()
3350 * inside the inlined code
3352 if (!(cfg->opt & MONO_OPT_SHARED)) {
3353 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3354 if (cfg->run_cctors && method->klass->has_cctor) {
3355 if (!method->klass->runtime_info)
3356 /* No vtable created yet */
3358 vtable = mono_class_vtable (cfg->domain, method->klass);
3361 /* This makes so that inline cannot trigger */
3362 /* .cctors: too many apps depend on them */
3363 /* running with a specific order... */
3364 if (! vtable->initialized)
3366 mono_runtime_class_init (vtable);
3368 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3369 if (!method->klass->runtime_info)
3370 /* No vtable created yet */
3372 vtable = mono_class_vtable (cfg->domain, method->klass);
3375 if (!vtable->initialized)
3380 * If we're compiling for shared code
3381 * the cctor will need to be run at aot method load time, for example,
3382 * or at the end of the compilation of the inlining method.
3384 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3389 * CAS - do not inline methods with declarative security
3390 * Note: this has to be before any possible return TRUE;
3392 if (mono_method_has_declsec (method))
3395 #ifdef MONO_ARCH_SOFT_FLOAT
3397 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3399 for (i = 0; i < sig->param_count; ++i)
3400 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3408 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3410 if (vtable->initialized && !cfg->compile_aot)
3413 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3416 if (!mono_class_needs_cctor_run (vtable->klass, method))
3419 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3420 /* The initialization is already done before the method is called */
3427 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3431 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3433 mono_class_init (klass);
3434 size = mono_class_array_element_size (klass);
3436 mult_reg = alloc_preg (cfg);
3437 array_reg = arr->dreg;
3438 index_reg = index->dreg;
3440 #if SIZEOF_VOID_P == 8
3441 /* The array reg is 64 bits but the index reg is only 32 */
3442 index2_reg = alloc_preg (cfg);
3443 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3445 index2_reg = index_reg;
3448 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3450 #if defined(__i386__) || defined(__x86_64__)
3451 if (size == 1 || size == 2 || size == 4 || size == 8) {
3452 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3454 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3455 ins->type = STACK_PTR;
3461 add_reg = alloc_preg (cfg);
3463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3464 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3465 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3466 ins->type = STACK_PTR;
3467 MONO_ADD_INS (cfg->cbb, ins);
3472 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3474 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3476 int bounds_reg = alloc_preg (cfg);
3477 int add_reg = alloc_preg (cfg);
3478 int mult_reg = alloc_preg (cfg);
3479 int mult2_reg = alloc_preg (cfg);
3480 int low1_reg = alloc_preg (cfg);
3481 int low2_reg = alloc_preg (cfg);
3482 int high1_reg = alloc_preg (cfg);
3483 int high2_reg = alloc_preg (cfg);
3484 int realidx1_reg = alloc_preg (cfg);
3485 int realidx2_reg = alloc_preg (cfg);
3486 int sum_reg = alloc_preg (cfg);
3491 mono_class_init (klass);
3492 size = mono_class_array_element_size (klass);
3494 index1 = index_ins1->dreg;
3495 index2 = index_ins2->dreg;
3497 /* range checking */
3498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3499 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3502 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3503 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3504 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3505 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3507 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3510 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3511 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3513 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3514 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3515 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3517 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3520 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3521 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3523 ins->type = STACK_MP;
3525 MONO_ADD_INS (cfg->cbb, ins);
3532 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3536 MonoMethod *addr_method;
3539 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3542 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3544 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3545 /* emit_ldelema_2 depends on OP_LMUL */
3546 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3547 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3551 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3552 addr_method = mono_marshal_get_array_address (rank, element_size);
3553 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3559 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3561 MonoInst *ins = NULL;
3563 static MonoClass *runtime_helpers_class = NULL;
3564 if (! runtime_helpers_class)
3565 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3566 "System.Runtime.CompilerServices", "RuntimeHelpers");
3568 if (cmethod->klass == mono_defaults.string_class) {
3569 if (strcmp (cmethod->name, "get_Chars") == 0) {
3570 int dreg = alloc_ireg (cfg);
3571 int index_reg = alloc_preg (cfg);
3572 int mult_reg = alloc_preg (cfg);
3573 int add_reg = alloc_preg (cfg);
3575 #if SIZEOF_VOID_P == 8
3576 /* The array reg is 64 bits but the index reg is only 32 */
3577 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3579 index_reg = args [1]->dreg;
3581 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3583 #if defined(__i386__) || defined(__x86_64__)
3584 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3585 add_reg = ins->dreg;
3586 /* Avoid a warning */
3588 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3592 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3593 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3594 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3596 type_from_op (ins, NULL, NULL);
3598 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3599 int dreg = alloc_ireg (cfg);
3600 /* Decompose later to allow more optimizations */
3601 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3602 ins->type = STACK_I4;
3603 cfg->cbb->has_array_access = TRUE;
3604 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3607 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3608 int mult_reg = alloc_preg (cfg);
3609 int add_reg = alloc_preg (cfg);
3611 /* The corlib functions check for oob already. */
3612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3613 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3617 } else if (cmethod->klass == mono_defaults.object_class) {
3619 if (strcmp (cmethod->name, "GetType") == 0) {
3620 int dreg = alloc_preg (cfg);
3621 int vt_reg = alloc_preg (cfg);
3622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3623 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3624 type_from_op (ins, NULL, NULL);
3627 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3628 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3629 int dreg = alloc_ireg (cfg);
3630 int t1 = alloc_ireg (cfg);
3632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3633 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3634 ins->type = STACK_I4;
3638 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3639 MONO_INST_NEW (cfg, ins, OP_NOP);
3640 MONO_ADD_INS (cfg->cbb, ins);
3644 } else if (cmethod->klass == mono_defaults.array_class) {
3645 if (cmethod->name [0] != 'g')
3648 if (strcmp (cmethod->name, "get_Rank") == 0) {
3649 int dreg = alloc_ireg (cfg);
3650 int vtable_reg = alloc_preg (cfg);
3651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3652 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3653 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3654 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3655 type_from_op (ins, NULL, NULL);
3658 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3659 int dreg = alloc_ireg (cfg);
3661 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3662 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3663 type_from_op (ins, NULL, NULL);
3668 } else if (cmethod->klass == runtime_helpers_class) {
3670 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3671 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3675 } else if (cmethod->klass == mono_defaults.thread_class) {
3676 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3677 ins->dreg = alloc_preg (cfg);
3678 ins->type = STACK_OBJ;
3679 MONO_ADD_INS (cfg->cbb, ins);
3681 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3682 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3683 MONO_ADD_INS (cfg->cbb, ins);
3685 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3686 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3687 MONO_ADD_INS (cfg->cbb, ins);
3690 } else if (mini_class_is_system_array (cmethod->klass) &&
3691 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3692 MonoInst *addr, *store, *load;
3693 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3695 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3696 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3697 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3699 } else if (cmethod->klass->image == mono_defaults.corlib &&
3700 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3701 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3704 #if SIZEOF_VOID_P == 8
3705 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3706 /* 64 bit reads are already atomic */
3707 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3708 ins->dreg = mono_alloc_preg (cfg);
3709 ins->inst_basereg = args [0]->dreg;
3710 ins->inst_offset = 0;
3711 MONO_ADD_INS (cfg->cbb, ins);
3715 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3716 if (strcmp (cmethod->name, "Increment") == 0) {
3717 MonoInst *ins_iconst;
3720 if (fsig->params [0]->type == MONO_TYPE_I4)
3721 opcode = OP_ATOMIC_ADD_NEW_I4;
3722 #if SIZEOF_VOID_P == 8
3723 else if (fsig->params [0]->type == MONO_TYPE_I8)
3724 opcode = OP_ATOMIC_ADD_NEW_I8;
3727 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3728 ins_iconst->inst_c0 = 1;
3729 ins_iconst->dreg = mono_alloc_ireg (cfg);
3730 MONO_ADD_INS (cfg->cbb, ins_iconst);
3732 MONO_INST_NEW (cfg, ins, opcode);
3733 ins->dreg = mono_alloc_ireg (cfg);
3734 ins->inst_basereg = args [0]->dreg;
3735 ins->inst_offset = 0;
3736 ins->sreg2 = ins_iconst->dreg;
3737 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3738 MONO_ADD_INS (cfg->cbb, ins);
3740 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3741 MonoInst *ins_iconst;
3744 if (fsig->params [0]->type == MONO_TYPE_I4)
3745 opcode = OP_ATOMIC_ADD_NEW_I4;
3746 #if SIZEOF_VOID_P == 8
3747 else if (fsig->params [0]->type == MONO_TYPE_I8)
3748 opcode = OP_ATOMIC_ADD_NEW_I8;
3751 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3752 ins_iconst->inst_c0 = -1;
3753 ins_iconst->dreg = mono_alloc_ireg (cfg);
3754 MONO_ADD_INS (cfg->cbb, ins_iconst);
3756 MONO_INST_NEW (cfg, ins, opcode);
3757 ins->dreg = mono_alloc_ireg (cfg);
3758 ins->inst_basereg = args [0]->dreg;
3759 ins->inst_offset = 0;
3760 ins->sreg2 = ins_iconst->dreg;
3761 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3762 MONO_ADD_INS (cfg->cbb, ins);
3764 } else if (strcmp (cmethod->name, "Add") == 0) {
3767 if (fsig->params [0]->type == MONO_TYPE_I4)
3768 opcode = OP_ATOMIC_ADD_NEW_I4;
3769 #if SIZEOF_VOID_P == 8
3770 else if (fsig->params [0]->type == MONO_TYPE_I8)
3771 opcode = OP_ATOMIC_ADD_NEW_I8;
3775 MONO_INST_NEW (cfg, ins, opcode);
3776 ins->dreg = mono_alloc_ireg (cfg);
3777 ins->inst_basereg = args [0]->dreg;
3778 ins->inst_offset = 0;
3779 ins->sreg2 = args [1]->dreg;
3780 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3781 MONO_ADD_INS (cfg->cbb, ins);
3784 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3786 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3787 if (strcmp (cmethod->name, "Exchange") == 0) {
3790 if (fsig->params [0]->type == MONO_TYPE_I4)
3791 opcode = OP_ATOMIC_EXCHANGE_I4;
3792 #if SIZEOF_VOID_P == 8
3793 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3794 (fsig->params [0]->type == MONO_TYPE_I) ||
3795 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3796 opcode = OP_ATOMIC_EXCHANGE_I8;
3798 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3799 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3800 opcode = OP_ATOMIC_EXCHANGE_I4;
3805 MONO_INST_NEW (cfg, ins, opcode);
3806 ins->dreg = mono_alloc_ireg (cfg);
3807 ins->inst_basereg = args [0]->dreg;
3808 ins->inst_offset = 0;
3809 ins->sreg2 = args [1]->dreg;
3810 MONO_ADD_INS (cfg->cbb, ins);
3812 switch (fsig->params [0]->type) {
3814 ins->type = STACK_I4;
3818 ins->type = STACK_I8;
3820 case MONO_TYPE_OBJECT:
3821 ins->type = STACK_OBJ;
3824 g_assert_not_reached ();
3827 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3829 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3831 * Can't implement CompareExchange methods this way since they have
3832 * three arguments. We can implement one of the common cases, where the new
3833 * value is a constant.
3835 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3836 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3837 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3838 ins->dreg = alloc_ireg (cfg);
3839 ins->sreg1 = args [0]->dreg;
3840 ins->sreg2 = args [1]->dreg;
3841 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3842 ins->type = STACK_I4;
3843 MONO_ADD_INS (cfg->cbb, ins);
3845 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3847 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3851 } else if (cmethod->klass->image == mono_defaults.corlib) {
3852 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3853 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3854 MONO_INST_NEW (cfg, ins, OP_BREAK);
3855 MONO_ADD_INS (cfg->cbb, ins);
3858 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3859 && strcmp (cmethod->klass->name, "Environment") == 0) {
3860 #ifdef PLATFORM_WIN32
3861 EMIT_NEW_ICONST (cfg, ins, 1);
3863 EMIT_NEW_ICONST (cfg, ins, 0);
3867 } else if (cmethod->klass == mono_defaults.math_class) {
3869 * There is general branches code for Min/Max, but it does not work for
3871 * http://everything2.com/?node_id=1051618
3875 #ifdef MONO_ARCH_SIMD_INTRINSICS
3876 if (cfg->opt & MONO_OPT_SIMD) {
3877 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3883 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3887 * This entry point could be used later for arbitrary method
3890 inline static MonoInst*
3891 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3892 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3894 if (method->klass == mono_defaults.string_class) {
3895 /* managed string allocation support */
3896 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3897 MonoInst *iargs [2];
3898 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3899 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3902 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3903 iargs [1] = args [0];
3904 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3911 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3913 MonoInst *store, *temp;
3916 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3917 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3920 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3921 * would be different than the MonoInst's used to represent arguments, and
3922 * the ldelema implementation can't deal with that.
3923 * Solution: When ldelema is used on an inline argument, create a var for
3924 * it, emit ldelema on that var, and emit the saving code below in
3925 * inline_method () if needed.
3927 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3928 cfg->args [i] = temp;
3929 /* This uses cfg->args [i] which is set by the preceeding line */
3930 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3931 store->cil_code = sp [0]->cil_code;
3936 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3937 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3939 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3941 check_inline_called_method_name_limit (MonoMethod *called_method)
3944 static char *limit = NULL;
3946 if (limit == NULL) {
3947 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3949 if (limit_string != NULL)
3950 limit = limit_string;
3952 limit = (char *) "";
3955 if (limit [0] != '\0') {
3956 char *called_method_name = mono_method_full_name (called_method, TRUE);
3958 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3959 g_free (called_method_name);
3961 //return (strncmp_result <= 0);
3962 return (strncmp_result == 0);
3969 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3971 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3974 static char *limit = NULL;
3976 if (limit == NULL) {
3977 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3978 if (limit_string != NULL) {
3979 limit = limit_string;
3981 limit = (char *) "";
3985 if (limit [0] != '\0') {
3986 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3988 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3989 g_free (caller_method_name);
3991 //return (strncmp_result <= 0);
3992 return (strncmp_result == 0);
4000 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4001 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4003 MonoInst *ins, *rvar = NULL;
4004 MonoMethodHeader *cheader;
4005 MonoBasicBlock *ebblock, *sbblock;
4007 MonoMethod *prev_inlined_method;
4008 MonoInst **prev_locals, **prev_args;
4009 MonoType **prev_arg_types;
4010 guint prev_real_offset;
4011 GHashTable *prev_cbb_hash;
4012 MonoBasicBlock **prev_cil_offset_to_bb;
4013 MonoBasicBlock *prev_cbb;
4014 unsigned char* prev_cil_start;
4015 guint32 prev_cil_offset_to_bb_len;
4016 MonoMethod *prev_current_method;
4017 MonoGenericContext *prev_generic_context;
4019 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4021 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4022 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4025 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4026 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4030 if (cfg->verbose_level > 2)
4031 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4033 if (!cmethod->inline_info) {
4034 mono_jit_stats.inlineable_methods++;
4035 cmethod->inline_info = 1;
4037 /* allocate space to store the return value */
4038 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4039 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4042 /* allocate local variables */
4043 cheader = mono_method_get_header (cmethod);
4044 prev_locals = cfg->locals;
4045 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4046 for (i = 0; i < cheader->num_locals; ++i)
4047 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4049 /* allocate start and end blocks */
4050 /* This is needed so if the inline is aborted, we can clean up */
4051 NEW_BBLOCK (cfg, sbblock);
4052 sbblock->real_offset = real_offset;
4054 NEW_BBLOCK (cfg, ebblock);
4055 ebblock->block_num = cfg->num_bblocks++;
4056 ebblock->real_offset = real_offset;
4058 prev_args = cfg->args;
4059 prev_arg_types = cfg->arg_types;
4060 prev_inlined_method = cfg->inlined_method;
4061 cfg->inlined_method = cmethod;
4062 cfg->ret_var_set = FALSE;
4063 prev_real_offset = cfg->real_offset;
4064 prev_cbb_hash = cfg->cbb_hash;
4065 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4066 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4067 prev_cil_start = cfg->cil_start;
4068 prev_cbb = cfg->cbb;
4069 prev_current_method = cfg->current_method;
4070 prev_generic_context = cfg->generic_context;
4072 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4074 cfg->inlined_method = prev_inlined_method;
4075 cfg->real_offset = prev_real_offset;
4076 cfg->cbb_hash = prev_cbb_hash;
4077 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4078 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4079 cfg->cil_start = prev_cil_start;
4080 cfg->locals = prev_locals;
4081 cfg->args = prev_args;
4082 cfg->arg_types = prev_arg_types;
4083 cfg->current_method = prev_current_method;
4084 cfg->generic_context = prev_generic_context;
4086 if ((costs >= 0 && costs < 60) || inline_allways) {
4087 if (cfg->verbose_level > 2)
4088 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4090 mono_jit_stats.inlined_methods++;
4092 /* always add some code to avoid block split failures */
4093 MONO_INST_NEW (cfg, ins, OP_NOP);
4094 MONO_ADD_INS (prev_cbb, ins);
4096 prev_cbb->next_bb = sbblock;
4097 link_bblock (cfg, prev_cbb, sbblock);
4100 * Get rid of the begin and end bblocks if possible to aid local
4103 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4105 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4106 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4108 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4109 MonoBasicBlock *prev = ebblock->in_bb [0];
4110 mono_merge_basic_blocks (cfg, prev, ebblock);
4118 * If the inlined method contains only a throw, then the ret var is not
4119 * set, so set it to a dummy value.
4121 if (!cfg->ret_var_set) {
4122 static double r8_0 = 0.0;
4124 switch (rvar->type) {
4126 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4129 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4134 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4137 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4138 ins->type = STACK_R8;
4139 ins->inst_p0 = (void*)&r8_0;
4140 ins->dreg = rvar->dreg;
4141 MONO_ADD_INS (cfg->cbb, ins);
4144 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4147 g_assert_not_reached ();
4151 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4156 if (cfg->verbose_level > 2)
4157 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4158 cfg->exception_type = MONO_EXCEPTION_NONE;
4159 mono_loader_clear_error ();
4161 /* This gets rid of the newly added bblocks */
4162 cfg->cbb = prev_cbb;
4168 * Some of these comments may well be out-of-date.
4169 * Design decisions: we do a single pass over the IL code (and we do bblock
4170 * splitting/merging in the few cases when it's required: a back jump to an IL
4171 * address that was not already seen as bblock starting point).
4172 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4173 * Complex operations are decomposed in simpler ones right away. We need to let the
4174 * arch-specific code peek and poke inside this process somehow (except when the
4175 * optimizations can take advantage of the full semantic info of coarse opcodes).
4176 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4177 * MonoInst->opcode initially is the IL opcode or some simplification of that
4178 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4179 * opcode with value bigger than OP_LAST.
4180 * At this point the IR can be handed over to an interpreter, a dumb code generator
4181 * or to the optimizing code generator that will translate it to SSA form.
4183 * Profiling directed optimizations.
4184 * We may compile by default with few or no optimizations and instrument the code
4185 * or the user may indicate what methods to optimize the most either in a config file
4186 * or through repeated runs where the compiler applies offline the optimizations to
4187 * each method and then decides if it was worth it.
4190 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4191 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4192 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4193 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4194 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4195 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4196 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4197 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4199 /* offset from br.s -> br like opcodes */
4200 #define BIG_BRANCH_OFFSET 13
4203 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4205 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4207 return b == NULL || b == bb;
4211 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4213 unsigned char *ip = start;
4214 unsigned char *target;
4217 MonoBasicBlock *bblock;
4218 const MonoOpcode *opcode;
4221 cli_addr = ip - start;
4222 i = mono_opcode_value ((const guint8 **)&ip, end);
4225 opcode = &mono_opcodes [i];
4226 switch (opcode->argument) {
4227 case MonoInlineNone:
4230 case MonoInlineString:
4231 case MonoInlineType:
4232 case MonoInlineField:
4233 case MonoInlineMethod:
4236 case MonoShortInlineR:
4243 case MonoShortInlineVar:
4244 case MonoShortInlineI:
4247 case MonoShortInlineBrTarget:
4248 target = start + cli_addr + 2 + (signed char)ip [1];
4249 GET_BBLOCK (cfg, bblock, target);
4252 GET_BBLOCK (cfg, bblock, ip);
4254 case MonoInlineBrTarget:
4255 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4256 GET_BBLOCK (cfg, bblock, target);
4259 GET_BBLOCK (cfg, bblock, ip);
4261 case MonoInlineSwitch: {
4262 guint32 n = read32 (ip + 1);
4265 cli_addr += 5 + 4 * n;
4266 target = start + cli_addr;
4267 GET_BBLOCK (cfg, bblock, target);
4269 for (j = 0; j < n; ++j) {
4270 target = start + cli_addr + (gint32)read32 (ip);
4271 GET_BBLOCK (cfg, bblock, target);
4281 g_assert_not_reached ();
4284 if (i == CEE_THROW) {
4285 unsigned char *bb_start = ip - 1;
4287 /* Find the start of the bblock containing the throw */
4289 while ((bb_start >= start) && !bblock) {
4290 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4294 bblock->out_of_line = 1;
4303 static inline MonoMethod *
4304 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4308 if (m->wrapper_type != MONO_WRAPPER_NONE)
4309 return mono_method_get_wrapper_data (m, token);
4311 method = mono_get_method_full (m->klass->image, token, klass, context);
4316 static inline MonoMethod *
4317 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4319 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4321 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4327 static inline MonoClass*
4328 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4332 if (method->wrapper_type != MONO_WRAPPER_NONE)
4333 klass = mono_method_get_wrapper_data (method, token);
4335 klass = mono_class_get_full (method->klass->image, token, context);
4337 mono_class_init (klass);
4342 * Returns TRUE if the JIT should abort inlining because "callee"
4343 * is influenced by security attributes.
4346 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4350 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4354 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4355 if (result == MONO_JIT_SECURITY_OK)
4358 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4359 /* Generate code to throw a SecurityException before the actual call/link */
4360 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4363 NEW_ICONST (cfg, args [0], 4);
4364 NEW_METHODCONST (cfg, args [1], caller);
4365 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4366 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4367 /* don't hide previous results */
4368 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4369 cfg->exception_data = result;
4377 method_access_exception (void)
4379 static MonoMethod *method = NULL;
4382 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4383 method = mono_class_get_method_from_name (secman->securitymanager,
4384 "MethodAccessException", 2);
4391 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4392 MonoBasicBlock *bblock, unsigned char *ip)
4394 MonoMethod *thrower = method_access_exception ();
4397 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4398 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4399 mono_emit_method_call (cfg, thrower, args, NULL);
4403 verification_exception (void)
4405 static MonoMethod *method = NULL;
4408 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4409 method = mono_class_get_method_from_name (secman->securitymanager,
4410 "VerificationException", 0);
4417 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4419 MonoMethod *thrower = verification_exception ();
4421 mono_emit_method_call (cfg, thrower, NULL, NULL);
4425 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4426 MonoBasicBlock *bblock, unsigned char *ip)
4428 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4429 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4430 gboolean is_safe = TRUE;
4432 if (!(caller_level >= callee_level ||
4433 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4434 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4439 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4443 method_is_safe (MonoMethod *method)
4446 if (strcmp (method->name, "unsafeMethod") == 0)
4453 * Check that the IL instructions at ip are the array initialization
4454 * sequence and return the pointer to the data and the size.
4457 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4460 * newarr[System.Int32]
4462 * ldtoken field valuetype ...
4463 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4465 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4466 guint32 token = read32 (ip + 7);
4467 guint32 field_token = read32 (ip + 2);
4468 guint32 field_index = field_token & 0xffffff;
4470 const char *data_ptr;
4472 MonoMethod *cmethod;
4473 MonoClass *dummy_class;
4474 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4480 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4483 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4485 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4486 case MONO_TYPE_BOOLEAN:
4490 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4491 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4492 case MONO_TYPE_CHAR:
4502 return NULL; /* stupid ARM FP swapped format */
4512 if (size > mono_type_size (field->type, &dummy_align))
4515 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4516 if (!method->klass->image->dynamic) {
4517 field_index = read32 (ip + 2) & 0xffffff;
4518 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4519 data_ptr = mono_image_rva_map (method->klass->image, rva);
4520 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4521 /* for aot code we do the lookup on load */
4522 if (aot && data_ptr)
4523 return GUINT_TO_POINTER (rva);
4525 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4527 data_ptr = field->data;
4535 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4537 char *method_fname = mono_method_full_name (method, TRUE);
4540 if (mono_method_get_header (method)->code_size == 0)
4541 method_code = g_strdup ("method body is empty.");
4543 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4544 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4545 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4546 g_free (method_fname);
4547 g_free (method_code);
4551 set_exception_object (MonoCompile *cfg, MonoException *exception)
4553 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4554 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4555 cfg->exception_ptr = exception;
4559 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4563 if (cfg->generic_sharing_context)
4564 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4566 type = &klass->byval_arg;
4567 return MONO_TYPE_IS_REFERENCE (type);
4571 * mono_decompose_array_access_opts:
4573 * Decompose array access opcodes.
4574 * This should be in decompose.c, but it emits calls so it has to stay here until
4575 * the old JIT is gone.
4578 mono_decompose_array_access_opts (MonoCompile *cfg)
4580 MonoBasicBlock *bb, *first_bb;
4583 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4584 * can be executed anytime. It should be run before decompose_long
4588 * Create a dummy bblock and emit code into it so we can use the normal
4589 * code generation macros.
4591 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4592 first_bb = cfg->cbb;
4594 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4596 MonoInst *prev = NULL;
4598 MonoInst *iargs [3];
4601 if (!bb->has_array_access)
4604 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4606 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4612 for (ins = bb->code; ins; ins = ins->next) {
4613 switch (ins->opcode) {
4615 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4616 G_STRUCT_OFFSET (MonoArray, max_length));
4617 MONO_ADD_INS (cfg->cbb, dest);
4619 case OP_BOUNDS_CHECK:
4620 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4623 if (cfg->opt & MONO_OPT_SHARED) {
4624 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4625 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4626 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4627 iargs [2]->dreg = ins->sreg1;
4629 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4630 dest->dreg = ins->dreg;
4632 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4635 NEW_VTABLECONST (cfg, iargs [0], vtable);
4636 MONO_ADD_INS (cfg->cbb, iargs [0]);
4637 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4638 iargs [1]->dreg = ins->sreg1;
4640 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4641 dest->dreg = ins->dreg;
4645 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4646 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4647 MONO_ADD_INS (cfg->cbb, dest);
4653 g_assert (cfg->cbb == first_bb);
4655 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4656 /* Replace the original instruction with the new code sequence */
4658 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4659 first_bb->code = first_bb->last_ins = NULL;
4660 first_bb->in_count = first_bb->out_count = 0;
4661 cfg->cbb = first_bb;
4668 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4678 #ifdef MONO_ARCH_SOFT_FLOAT
4681 * mono_decompose_soft_float:
4683 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4684 * similar to long support on 32 bit platforms. 32 bit float values require special
4685 * handling when used as locals, arguments, and in calls.
4686 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4689 mono_decompose_soft_float (MonoCompile *cfg)
4691 MonoBasicBlock *bb, *first_bb;
4694 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4698 * Create a dummy bblock and emit code into it so we can use the normal
4699 * code generation macros.
4701 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4702 first_bb = cfg->cbb;
4704 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4706 MonoInst *prev = NULL;
4709 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4711 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4717 for (ins = bb->code; ins; ins = ins->next) {
4718 const char *spec = INS_INFO (ins->opcode);
4720 /* Most fp operations are handled automatically by opcode emulation */
4722 switch (ins->opcode) {
4725 d.vald = *(double*)ins->inst_p0;
4726 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4731 /* We load the r8 value */
4732 d.vald = *(float*)ins->inst_p0;
4733 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4737 ins->opcode = OP_LMOVE;
4740 ins->opcode = OP_MOVE;
4741 ins->sreg1 = ins->sreg1 + 1;
4744 ins->opcode = OP_MOVE;
4745 ins->sreg1 = ins->sreg1 + 2;
4748 int reg = ins->sreg1;
4750 ins->opcode = OP_SETLRET;
4752 ins->sreg1 = reg + 1;
4753 ins->sreg2 = reg + 2;
4756 case OP_LOADR8_MEMBASE:
4757 ins->opcode = OP_LOADI8_MEMBASE;
4759 case OP_STORER8_MEMBASE_REG:
4760 ins->opcode = OP_STOREI8_MEMBASE_REG;
4762 case OP_STORER4_MEMBASE_REG: {
4763 MonoInst *iargs [2];
4766 /* Arg 1 is the double value */
4767 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4768 iargs [0]->dreg = ins->sreg1;
4770 /* Arg 2 is the address to store to */
4771 addr_reg = mono_alloc_preg (cfg);
4772 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4773 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4777 case OP_LOADR4_MEMBASE: {
4778 MonoInst *iargs [1];
4782 addr_reg = mono_alloc_preg (cfg);
4783 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4784 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4785 conv->dreg = ins->dreg;
4790 case OP_FCALL_MEMBASE: {
4791 MonoCallInst *call = (MonoCallInst*)ins;
4792 if (call->signature->ret->type == MONO_TYPE_R4) {
4793 MonoCallInst *call2;
4794 MonoInst *iargs [1];
4797 /* Convert the call into a call returning an int */
4798 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4799 memcpy (call2, call, sizeof (MonoCallInst));
4800 switch (ins->opcode) {
4802 call2->inst.opcode = OP_CALL;
4805 call2->inst.opcode = OP_CALL_REG;
4807 case OP_FCALL_MEMBASE:
4808 call2->inst.opcode = OP_CALL_MEMBASE;
4811 g_assert_not_reached ();
4813 call2->inst.dreg = mono_alloc_ireg (cfg);
4814 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4816 /* FIXME: Optimize this */
4818 /* Emit an r4->r8 conversion */
4819 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4820 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4821 conv->dreg = ins->dreg;
4823 switch (ins->opcode) {
4825 ins->opcode = OP_LCALL;
4828 ins->opcode = OP_LCALL_REG;
4830 case OP_FCALL_MEMBASE:
4831 ins->opcode = OP_LCALL_MEMBASE;
4834 g_assert_not_reached ();
4840 MonoJitICallInfo *info;
4841 MonoInst *iargs [2];
4842 MonoInst *call, *cmp, *br;
4844 /* Convert fcompare+fbcc to icall+icompare+beq */
4846 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4849 /* Create dummy MonoInst's for the arguments */
4850 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4851 iargs [0]->dreg = ins->sreg1;
4852 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4853 iargs [1]->dreg = ins->sreg2;
4855 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4857 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4858 cmp->sreg1 = call->dreg;
4860 MONO_ADD_INS (cfg->cbb, cmp);
4862 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4863 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4864 br->inst_true_bb = ins->next->inst_true_bb;
4865 br->inst_false_bb = ins->next->inst_false_bb;
4866 MONO_ADD_INS (cfg->cbb, br);
4868 /* The call sequence might include fp ins */
4871 /* Skip fbcc or fccc */
4872 NULLIFY_INS (ins->next);
4880 MonoJitICallInfo *info;
4881 MonoInst *iargs [2];
4884 /* Convert fccc to icall+icompare+iceq */
4886 info = mono_find_jit_opcode_emulation (ins->opcode);
4889 /* Create dummy MonoInst's for the arguments */
4890 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4891 iargs [0]->dreg = ins->sreg1;
4892 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4893 iargs [1]->dreg = ins->sreg2;
4895 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4898 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4900 /* The call sequence might include fp ins */
4905 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4906 mono_print_ins (ins);
4907 g_assert_not_reached ();
4912 g_assert (cfg->cbb == first_bb);
4914 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4915 /* Replace the original instruction with the new code sequence */
4917 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4918 first_bb->code = first_bb->last_ins = NULL;
4919 first_bb->in_count = first_bb->out_count = 0;
4920 cfg->cbb = first_bb;
4927 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4930 mono_decompose_long_opts (cfg);
4936 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4939 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4940 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4941 /* Optimize reg-reg moves away */
4943 * Can't optimize other opcodes, since sp[0] might point to
4944 * the last ins of a decomposed opcode.
4946 sp [0]->dreg = (cfg)->locals [n]->dreg;
4948 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4953 * ldloca inhibits many optimizations so try to get rid of it in common
4956 static inline unsigned char *
4957 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4966 local = read16 (ip + 2);
4970 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4971 gboolean skip = FALSE;
4973 /* From the INITOBJ case */
4974 token = read32 (ip + 2);
4975 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
4976 CHECK_TYPELOAD (klass);
4977 if (generic_class_is_reference_type (cfg, klass)) {
4978 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4979 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
4980 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4981 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
4982 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
4995 * mono_method_to_ir: translates IL into basic blocks containing trees
4998 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4999 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5000 guint inline_offset, gboolean is_virtual_call)
5002 MonoInst *ins, **sp, **stack_start;
5003 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5004 MonoMethod *cmethod, *method_definition;
5005 MonoInst **arg_array;
5006 MonoMethodHeader *header;
5008 guint32 token, ins_flag;
5010 MonoClass *constrained_call = NULL;
5011 unsigned char *ip, *end, *target, *err_pos;
5012 static double r8_0 = 0.0;
5013 MonoMethodSignature *sig;
5014 MonoGenericContext *generic_context = NULL;
5015 MonoGenericContainer *generic_container = NULL;
5016 MonoType **param_types;
5017 int i, n, start_new_bblock, dreg;
5018 int num_calls = 0, inline_costs = 0;
5019 int breakpoint_id = 0;
5021 MonoBoolean security, pinvoke;
5022 MonoSecurityManager* secman = NULL;
5023 MonoDeclSecurityActions actions;
5024 GSList *class_inits = NULL;
5025 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5028 /* serialization and xdomain stuff may need access to private fields and methods */
5029 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5030 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5031 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5032 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5033 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5034 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5036 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5038 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5039 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5040 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5041 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5043 image = method->klass->image;
5044 header = mono_method_get_header (method);
5045 generic_container = mono_method_get_generic_container (method);
5046 sig = mono_method_signature (method);
5047 num_args = sig->hasthis + sig->param_count;
5048 ip = (unsigned char*)header->code;
5049 cfg->cil_start = ip;
5050 end = ip + header->code_size;
5051 mono_jit_stats.cil_code_size += header->code_size;
5053 method_definition = method;
5054 while (method_definition->is_inflated) {
5055 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5056 method_definition = imethod->declaring;
5059 /* SkipVerification is not allowed if core-clr is enabled */
5060 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5062 dont_verify_stloc = TRUE;
5065 if (!dont_verify && mini_method_verify (cfg, method_definition))
5066 goto exception_exit;
5068 if (mono_debug_using_mono_debugger ())
5069 cfg->keep_cil_nops = TRUE;
5071 if (sig->is_inflated)
5072 generic_context = mono_method_get_context (method);
5073 else if (generic_container)
5074 generic_context = &generic_container->context;
5075 cfg->generic_context = generic_context;
5077 if (!cfg->generic_sharing_context)
5078 g_assert (!sig->has_type_parameters);
5080 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5081 g_assert (method->is_inflated);
5082 g_assert (mono_method_get_context (method)->method_inst);
5084 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5085 g_assert (sig->generic_param_count);
5087 if (cfg->method == method) {
5088 cfg->real_offset = 0;
5090 cfg->real_offset = inline_offset;
5093 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5094 cfg->cil_offset_to_bb_len = header->code_size;
5096 cfg->current_method = method;
5098 if (cfg->verbose_level > 2)
5099 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5101 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5103 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5104 for (n = 0; n < sig->param_count; ++n)
5105 param_types [n + sig->hasthis] = sig->params [n];
5106 cfg->arg_types = param_types;
5108 dont_inline = g_list_prepend (dont_inline, method);
5109 if (cfg->method == method) {
5111 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5112 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5115 NEW_BBLOCK (cfg, start_bblock);
5116 cfg->bb_entry = start_bblock;
5117 start_bblock->cil_code = NULL;
5118 start_bblock->cil_length = 0;
5121 NEW_BBLOCK (cfg, end_bblock);
5122 cfg->bb_exit = end_bblock;
5123 end_bblock->cil_code = NULL;
5124 end_bblock->cil_length = 0;
5125 g_assert (cfg->num_bblocks == 2);
5127 arg_array = cfg->args;
5129 if (header->num_clauses) {
5130 cfg->spvars = g_hash_table_new (NULL, NULL);
5131 cfg->exvars = g_hash_table_new (NULL, NULL);
5133 /* handle exception clauses */
5134 for (i = 0; i < header->num_clauses; ++i) {
5135 MonoBasicBlock *try_bb;
5136 MonoExceptionClause *clause = &header->clauses [i];
5137 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5138 try_bb->real_offset = clause->try_offset;
5139 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5140 tblock->real_offset = clause->handler_offset;
5141 tblock->flags |= BB_EXCEPTION_HANDLER;
5143 link_bblock (cfg, try_bb, tblock);
5145 if (*(ip + clause->handler_offset) == CEE_POP)
5146 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5148 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5149 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5150 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5151 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5152 MONO_ADD_INS (tblock, ins);
5154 /* todo: is a fault block unsafe to optimize? */
5155 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5156 tblock->flags |= BB_EXCEPTION_UNSAFE;
5160 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5162 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5164 /* catch and filter blocks get the exception object on the stack */
5165 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5166 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5167 MonoInst *dummy_use;
5169 /* mostly like handle_stack_args (), but just sets the input args */
5170 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5171 tblock->in_scount = 1;
5172 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5173 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5176 * Add a dummy use for the exvar so its liveness info will be
5180 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5182 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5183 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5184 tblock->real_offset = clause->data.filter_offset;
5185 tblock->in_scount = 1;
5186 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5187 /* The filter block shares the exvar with the handler block */
5188 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5189 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5190 MONO_ADD_INS (tblock, ins);
5194 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5195 clause->data.catch_class &&
5196 cfg->generic_sharing_context &&
5197 mono_class_check_context_used (clause->data.catch_class)) {
5198 if (mono_method_get_context (method)->method_inst)
5199 GENERIC_SHARING_FAILURE (CEE_NOP);
5202 * In shared generic code with catch
5203 * clauses containing type variables
5204 * the exception handling code has to
5205 * be able to get to the rgctx.
5206 * Therefore we have to make sure that
5207 * the vtable/mrgctx argument (for
5208 * static or generic methods) or the
5209 * "this" argument (for non-static
5210 * methods) are live.
5212 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5213 mini_method_get_context (method)->method_inst ||
5214 method->klass->valuetype) {
5215 mono_get_vtable_var (cfg);
5217 MonoInst *dummy_use;
5219 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5224 arg_array = alloca (sizeof (MonoInst *) * num_args);
5225 cfg->cbb = start_bblock;
5226 cfg->args = arg_array;
5227 mono_save_args (cfg, sig, inline_args);
5230 /* FIRST CODE BLOCK */
5231 NEW_BBLOCK (cfg, bblock);
5232 bblock->cil_code = ip;
5236 ADD_BBLOCK (cfg, bblock);
5238 if (cfg->method == method) {
5239 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5240 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5241 MONO_INST_NEW (cfg, ins, OP_BREAK);
5242 MONO_ADD_INS (bblock, ins);
5246 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5247 secman = mono_security_manager_get_methods ();
5249 security = (secman && mono_method_has_declsec (method));
5250 /* at this point having security doesn't mean we have any code to generate */
5251 if (security && (cfg->method == method)) {
5252 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5253 * And we do not want to enter the next section (with allocation) if we
5254 * have nothing to generate */
5255 security = mono_declsec_get_demands (method, &actions);
5258 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5259 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5261 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5262 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5263 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5265 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5266 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5270 mono_custom_attrs_free (custom);
5273 custom = mono_custom_attrs_from_class (wrapped->klass);
5274 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5278 mono_custom_attrs_free (custom);
5281 /* not a P/Invoke after all */
5286 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5287 /* we use a separate basic block for the initialization code */
5288 NEW_BBLOCK (cfg, init_localsbb);
5289 cfg->bb_init = init_localsbb;
5290 init_localsbb->real_offset = cfg->real_offset;
5291 start_bblock->next_bb = init_localsbb;
5292 init_localsbb->next_bb = bblock;
5293 link_bblock (cfg, start_bblock, init_localsbb);
5294 link_bblock (cfg, init_localsbb, bblock);
5296 cfg->cbb = init_localsbb;
5298 start_bblock->next_bb = bblock;
5299 link_bblock (cfg, start_bblock, bblock);
5302 /* at this point we know, if security is TRUE, that some code needs to be generated */
5303 if (security && (cfg->method == method)) {
5306 mono_jit_stats.cas_demand_generation++;
5308 if (actions.demand.blob) {
5309 /* Add code for SecurityAction.Demand */
5310 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5311 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5312 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5313 mono_emit_method_call (cfg, secman->demand, args, NULL);
5315 if (actions.noncasdemand.blob) {
5316 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5317 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5318 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5319 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5320 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5321 mono_emit_method_call (cfg, secman->demand, args, NULL);
5323 if (actions.demandchoice.blob) {
5324 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5325 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5326 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5327 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5328 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5332 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5334 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5337 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5338 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5339 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5340 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5341 if (!(method->klass && method->klass->image &&
5342 mono_security_core_clr_is_platform_image (method->klass->image))) {
5343 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5347 if (!method_is_safe (method))
5348 emit_throw_verification_exception (cfg, bblock, ip);
5351 if (header->code_size == 0)
5354 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5359 if (cfg->method == method)
5360 mono_debug_init_method (cfg, bblock, breakpoint_id);
5362 for (n = 0; n < header->num_locals; ++n) {
5363 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5368 /* add a check for this != NULL to inlined methods */
5369 if (is_virtual_call) {
5372 NEW_ARGLOAD (cfg, arg_ins, 0);
5373 MONO_ADD_INS (cfg->cbb, arg_ins);
5374 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5375 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5376 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5379 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5380 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5383 start_new_bblock = 0;
5387 if (cfg->method == method)
5388 cfg->real_offset = ip - header->code;
5390 cfg->real_offset = inline_offset;
5395 if (start_new_bblock) {
5396 bblock->cil_length = ip - bblock->cil_code;
5397 if (start_new_bblock == 2) {
5398 g_assert (ip == tblock->cil_code);
5400 GET_BBLOCK (cfg, tblock, ip);
5402 bblock->next_bb = tblock;
5405 start_new_bblock = 0;
5406 for (i = 0; i < bblock->in_scount; ++i) {
5407 if (cfg->verbose_level > 3)
5408 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5409 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5413 g_slist_free (class_inits);
5416 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5417 link_bblock (cfg, bblock, tblock);
5418 if (sp != stack_start) {
5419 handle_stack_args (cfg, stack_start, sp - stack_start);
5421 CHECK_UNVERIFIABLE (cfg);
5423 bblock->next_bb = tblock;
5426 for (i = 0; i < bblock->in_scount; ++i) {
5427 if (cfg->verbose_level > 3)
5428 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5429 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5432 g_slist_free (class_inits);
5437 bblock->real_offset = cfg->real_offset;
5439 if ((cfg->method == method) && cfg->coverage_info) {
5440 guint32 cil_offset = ip - header->code;
5441 cfg->coverage_info->data [cil_offset].cil_code = ip;
5443 /* TODO: Use an increment here */
5444 #if defined(__i386__)
5445 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5446 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5448 MONO_ADD_INS (cfg->cbb, ins);
5450 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5451 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5455 if (cfg->verbose_level > 3)
5456 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5460 if (cfg->keep_cil_nops)
5461 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5463 MONO_INST_NEW (cfg, ins, OP_NOP);
5465 MONO_ADD_INS (bblock, ins);
5468 MONO_INST_NEW (cfg, ins, OP_BREAK);
5470 MONO_ADD_INS (bblock, ins);
5476 CHECK_STACK_OVF (1);
5477 n = (*ip)-CEE_LDARG_0;
5479 EMIT_NEW_ARGLOAD (cfg, ins, n);
5487 CHECK_STACK_OVF (1);
5488 n = (*ip)-CEE_LDLOC_0;
5490 EMIT_NEW_LOCLOAD (cfg, ins, n);
5499 n = (*ip)-CEE_STLOC_0;
5502 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5504 emit_stloc_ir (cfg, sp, header, n);
5511 CHECK_STACK_OVF (1);
5514 EMIT_NEW_ARGLOAD (cfg, ins, n);
5520 CHECK_STACK_OVF (1);
5523 NEW_ARGLOADA (cfg, ins, n);
5524 MONO_ADD_INS (cfg->cbb, ins);
5534 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5536 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5541 CHECK_STACK_OVF (1);
5544 EMIT_NEW_LOCLOAD (cfg, ins, n);
5548 case CEE_LDLOCA_S: {
5549 unsigned char *tmp_ip;
5551 CHECK_STACK_OVF (1);
5552 CHECK_LOCAL (ip [1]);
5554 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5560 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5569 CHECK_LOCAL (ip [1]);
5570 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5572 emit_stloc_ir (cfg, sp, header, ip [1]);
5577 CHECK_STACK_OVF (1);
5578 EMIT_NEW_PCONST (cfg, ins, NULL);
5579 ins->type = STACK_OBJ;
5584 CHECK_STACK_OVF (1);
5585 EMIT_NEW_ICONST (cfg, ins, -1);
5598 CHECK_STACK_OVF (1);
5599 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5605 CHECK_STACK_OVF (1);
5607 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5613 CHECK_STACK_OVF (1);
5614 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5620 CHECK_STACK_OVF (1);
5621 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5622 ins->type = STACK_I8;
5623 ins->dreg = alloc_dreg (cfg, STACK_I8);
5625 ins->inst_l = (gint64)read64 (ip);
5626 MONO_ADD_INS (bblock, ins);
5632 /* FIXME: we should really allocate this only late in the compilation process */
5633 mono_domain_lock (cfg->domain);
5634 f = mono_domain_alloc (cfg->domain, sizeof (float));
5635 mono_domain_unlock (cfg->domain);
5637 CHECK_STACK_OVF (1);
5638 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5639 ins->type = STACK_R8;
5640 ins->dreg = alloc_dreg (cfg, STACK_R8);
5644 MONO_ADD_INS (bblock, ins);
5652 /* FIXME: we should really allocate this only late in the compilation process */
5653 mono_domain_lock (cfg->domain);
5654 d = mono_domain_alloc (cfg->domain, sizeof (double));
5655 mono_domain_unlock (cfg->domain);
5657 CHECK_STACK_OVF (1);
5658 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5659 ins->type = STACK_R8;
5660 ins->dreg = alloc_dreg (cfg, STACK_R8);
5664 MONO_ADD_INS (bblock, ins);
5671 MonoInst *temp, *store;
5673 CHECK_STACK_OVF (1);
5677 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5678 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5680 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5683 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5696 if (sp [0]->type == STACK_R8)
5697 /* we need to pop the value from the x86 FP stack */
5698 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5705 if (stack_start != sp)
5707 token = read32 (ip + 1);
5708 /* FIXME: check the signature matches */
5709 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5714 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5715 GENERIC_SHARING_FAILURE (CEE_JMP);
5717 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5718 if (check_linkdemand (cfg, method, cmethod))
5720 CHECK_CFG_EXCEPTION;
5725 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5728 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5730 /* Handle tail calls similarly to calls */
5731 n = fsig->param_count + fsig->hasthis;
5733 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5734 call->method = cmethod;
5735 call->tail_call = TRUE;
5736 call->signature = mono_method_signature (cmethod);
5737 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5738 call->inst.inst_p0 = cmethod;
5739 for (i = 0; i < n; ++i)
5740 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5742 mono_arch_emit_call (cfg, call);
5743 MONO_ADD_INS (bblock, (MonoInst*)call);
5746 for (i = 0; i < num_args; ++i)
5747 /* Prevent arguments from being optimized away */
5748 arg_array [i]->flags |= MONO_INST_VOLATILE;
5750 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5751 ins = (MonoInst*)call;
5752 ins->inst_p0 = cmethod;
5753 MONO_ADD_INS (bblock, ins);
5757 start_new_bblock = 1;
5762 case CEE_CALLVIRT: {
5763 MonoInst *addr = NULL;
5764 MonoMethodSignature *fsig = NULL;
5766 int virtual = *ip == CEE_CALLVIRT;
5767 int calli = *ip == CEE_CALLI;
5768 gboolean pass_imt_from_rgctx = FALSE;
5769 MonoInst *imt_arg = NULL;
5770 gboolean pass_vtable = FALSE;
5771 gboolean pass_mrgctx = FALSE;
5772 MonoInst *vtable_arg = NULL;
5773 gboolean check_this = FALSE;
5776 token = read32 (ip + 1);
5783 if (method->wrapper_type != MONO_WRAPPER_NONE)
5784 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5786 fsig = mono_metadata_parse_signature (image, token);
5788 n = fsig->param_count + fsig->hasthis;
5790 MonoMethod *cil_method;
5792 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5793 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5794 cil_method = cmethod;
5795 } else if (constrained_call) {
5796 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5798 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5799 cil_method = cmethod;
5804 if (!dont_verify && !cfg->skip_visibility) {
5805 MonoMethod *target_method = cil_method;
5806 if (method->is_inflated) {
5807 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5809 if (!mono_method_can_access_method (method_definition, target_method) &&
5810 !mono_method_can_access_method (method, cil_method))
5811 METHOD_ACCESS_FAILURE;
5814 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5815 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5817 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5818 /* MS.NET seems to silently convert this to a callvirt */
5821 if (!cmethod->klass->inited)
5822 if (!mono_class_init (cmethod->klass))
5825 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5826 mini_class_is_system_array (cmethod->klass)) {
5827 array_rank = cmethod->klass->rank;
5828 fsig = mono_method_signature (cmethod);
5830 if (mono_method_signature (cmethod)->pinvoke) {
5831 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5832 check_for_pending_exc, FALSE);
5833 fsig = mono_method_signature (wrapper);
5834 } else if (constrained_call) {
5835 fsig = mono_method_signature (cmethod);
5837 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5841 mono_save_token_info (cfg, image, token, cil_method);
5843 n = fsig->param_count + fsig->hasthis;
5845 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5846 if (check_linkdemand (cfg, method, cmethod))
5848 CHECK_CFG_EXCEPTION;
5851 if (cmethod->string_ctor)
5852 g_assert_not_reached ();
5855 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5858 if (!cfg->generic_sharing_context && cmethod)
5859 g_assert (!mono_method_check_context_used (cmethod));
5863 //g_assert (!virtual || fsig->hasthis);
5867 if (constrained_call) {
5869 * We have the `constrained.' prefix opcode.
5871 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5875 * The type parameter is instantiated as a valuetype,
5876 * but that type doesn't override the method we're
5877 * calling, so we need to box `this'.
5879 dreg = alloc_dreg (cfg, STACK_VTYPE);
5880 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5881 ins->klass = constrained_call;
5882 sp [0] = handle_box (cfg, ins, constrained_call);
5883 } else if (!constrained_call->valuetype) {
5884 int dreg = alloc_preg (cfg);
5887 * The type parameter is instantiated as a reference
5888 * type. We have a managed pointer on the stack, so
5889 * we need to dereference it here.
5891 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5892 ins->type = STACK_OBJ;
5894 } else if (cmethod->klass->valuetype)
5896 constrained_call = NULL;
5899 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5903 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5904 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5905 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5906 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5907 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5910 * Pass vtable iff target method might
5911 * be shared, which means that sharing
5912 * is enabled for its class and its
5913 * context is sharable (and it's not a
5916 if (sharing_enabled && context_sharable &&
5917 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5921 if (cmethod && mini_method_get_context (cmethod) &&
5922 mini_method_get_context (cmethod)->method_inst) {
5923 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5924 MonoGenericContext *context = mini_method_get_context (cmethod);
5925 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5927 g_assert (!pass_vtable);
5929 if (sharing_enabled && context_sharable)
5933 if (cfg->generic_sharing_context && cmethod) {
5934 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5936 context_used = mono_method_check_context_used (cmethod);
5938 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5939 /* Generic method interface
5940 calls are resolved via a
5941 helper function and don't
5943 if (!cmethod_context || !cmethod_context->method_inst)
5944 pass_imt_from_rgctx = TRUE;
5948 * If a shared method calls another
5949 * shared method then the caller must
5950 * have a generic sharing context
5951 * because the magic trampoline
5952 * requires it. FIXME: We shouldn't
5953 * have to force the vtable/mrgctx
5954 * variable here. Instead there
5955 * should be a flag in the cfg to
5956 * request a generic sharing context.
5959 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5960 mono_get_vtable_var (cfg);
5965 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5967 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5969 CHECK_TYPELOAD (cmethod->klass);
5970 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5975 g_assert (!vtable_arg);
5978 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5980 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5983 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5984 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5991 if (pass_imt_from_rgctx) {
5992 g_assert (!pass_vtable);
5995 imt_arg = emit_get_rgctx_method (cfg, context_used,
5996 cmethod, MONO_RGCTX_INFO_METHOD);
6002 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6003 check->sreg1 = sp [0]->dreg;
6004 MONO_ADD_INS (cfg->cbb, check);
6007 /* Calling virtual generic methods */
6008 if (cmethod && virtual &&
6009 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6010 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
6011 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6012 mono_method_signature (cmethod)->generic_param_count) {
6013 MonoInst *this_temp, *this_arg_temp, *store;
6014 MonoInst *iargs [4];
6016 g_assert (mono_method_signature (cmethod)->is_inflated);
6018 /* Prevent inlining of methods that contain indirect calls */
6021 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6022 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6023 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6024 g_assert (!imt_arg);
6026 imt_arg = emit_get_rgctx_method (cfg, context_used,
6027 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6031 cfg->disable_aot = TRUE;
6032 g_assert (cmethod->is_inflated);
6033 EMIT_NEW_PCONST (cfg, imt_arg,
6034 ((MonoMethodInflated*)cmethod)->context.method_inst);
6036 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6040 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6041 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6042 MONO_ADD_INS (bblock, store);
6044 /* FIXME: This should be a managed pointer */
6045 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6047 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6049 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6050 cmethod, MONO_RGCTX_INFO_METHOD);
6051 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6052 addr = mono_emit_jit_icall (cfg,
6053 mono_helper_compile_generic_method, iargs);
6055 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6056 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6057 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6060 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6062 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6065 if (!MONO_TYPE_IS_VOID (fsig->ret))
6074 /* FIXME: runtime generic context pointer for jumps? */
6075 /* FIXME: handle this for generic sharing eventually */
6076 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6077 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6080 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6083 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6084 call->tail_call = TRUE;
6085 call->method = cmethod;
6086 call->signature = mono_method_signature (cmethod);
6089 /* Handle tail calls similarly to calls */
6090 call->inst.opcode = OP_TAILCALL;
6092 mono_arch_emit_call (cfg, call);
6095 * We implement tail calls by storing the actual arguments into the
6096 * argument variables, then emitting a CEE_JMP.
6098 for (i = 0; i < n; ++i) {
6099 /* Prevent argument from being register allocated */
6100 arg_array [i]->flags |= MONO_INST_VOLATILE;
6101 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6105 ins = (MonoInst*)call;
6106 ins->inst_p0 = cmethod;
6107 ins->inst_p1 = arg_array [0];
6108 MONO_ADD_INS (bblock, ins);
6109 link_bblock (cfg, bblock, end_bblock);
6110 start_new_bblock = 1;
6111 /* skip CEE_RET as well */
6117 /* Conversion to a JIT intrinsic */
6118 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6119 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6120 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6131 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6132 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6133 mono_method_check_inlining (cfg, cmethod) &&
6134 !g_list_find (dont_inline, cmethod)) {
6136 gboolean allways = FALSE;
6138 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6139 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6140 /* Prevent inlining of methods that call wrappers */
6142 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6146 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6148 cfg->real_offset += 5;
6151 if (!MONO_TYPE_IS_VOID (fsig->ret))
6152 /* *sp is already set by inline_method */
6155 inline_costs += costs;
6161 inline_costs += 10 * num_calls++;
6163 /* Tail recursion elimination */
6164 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6165 gboolean has_vtargs = FALSE;
6168 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6171 /* keep it simple */
6172 for (i = fsig->param_count - 1; i >= 0; i--) {
6173 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6178 for (i = 0; i < n; ++i)
6179 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6180 MONO_INST_NEW (cfg, ins, OP_BR);
6181 MONO_ADD_INS (bblock, ins);
6182 tblock = start_bblock->out_bb [0];
6183 link_bblock (cfg, bblock, tblock);
6184 ins->inst_target_bb = tblock;
6185 start_new_bblock = 1;
6187 /* skip the CEE_RET, too */
6188 if (ip_in_bb (cfg, bblock, ip + 5))
6198 /* Generic sharing */
6199 /* FIXME: only do this for generic methods if
6200 they are not shared! */
6201 if (context_used && !imt_arg && !array_rank &&
6202 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6203 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6204 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6205 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6208 g_assert (cfg->generic_sharing_context && cmethod);
6212 * We are compiling a call to a
6213 * generic method from shared code,
6214 * which means that we have to look up
6215 * the method in the rgctx and do an
6218 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6221 /* Indirect calls */
6223 g_assert (!imt_arg);
6225 if (*ip == CEE_CALL)
6226 g_assert (context_used);
6227 else if (*ip == CEE_CALLI)
6228 g_assert (!vtable_arg);
6230 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6231 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6233 /* Prevent inlining of methods with indirect calls */
6237 #ifdef MONO_ARCH_RGCTX_REG
6239 int rgctx_reg = mono_alloc_preg (cfg);
6241 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6242 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6243 call = (MonoCallInst*)ins;
6244 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6245 cfg->uses_rgctx_reg = TRUE;
6250 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6252 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6253 if (fsig->pinvoke && !fsig->ret->byref) {
6257 * Native code might return non register sized integers
6258 * without initializing the upper bits.
6260 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6261 case OP_LOADI1_MEMBASE:
6262 widen_op = OP_ICONV_TO_I1;
6264 case OP_LOADU1_MEMBASE:
6265 widen_op = OP_ICONV_TO_U1;
6267 case OP_LOADI2_MEMBASE:
6268 widen_op = OP_ICONV_TO_I2;
6270 case OP_LOADU2_MEMBASE:
6271 widen_op = OP_ICONV_TO_U2;
6277 if (widen_op != -1) {
6278 int dreg = alloc_preg (cfg);
6281 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6282 widen->type = ins->type;
6299 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6300 if (sp [fsig->param_count]->type == STACK_OBJ) {
6301 MonoInst *iargs [2];
6304 iargs [1] = sp [fsig->param_count];
6306 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6309 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6310 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6311 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6312 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6314 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6317 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6318 if (!cmethod->klass->element_class->valuetype && !readonly)
6319 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6322 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6325 g_assert_not_reached ();
6333 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6335 if (!MONO_TYPE_IS_VOID (fsig->ret))
6346 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6348 } else if (imt_arg) {
6349 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6351 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6354 if (!MONO_TYPE_IS_VOID (fsig->ret))
6362 if (cfg->method != method) {
6363 /* return from inlined method */
6365 * If in_count == 0, that means the ret is unreachable due to
6366 * being preceeded by a throw. In that case, inline_method () will
6367 * handle setting the return value
6368 * (test case: test_0_inline_throw ()).
6370 if (return_var && cfg->cbb->in_count) {
6374 //g_assert (returnvar != -1);
6375 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6376 cfg->ret_var_set = TRUE;
6380 MonoType *ret_type = mono_method_signature (method)->ret;
6382 g_assert (!return_var);
6385 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6388 if (!cfg->vret_addr) {
6391 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6393 EMIT_NEW_RETLOADA (cfg, ret_addr);
6395 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6396 ins->klass = mono_class_from_mono_type (ret_type);
6399 #ifdef MONO_ARCH_SOFT_FLOAT
6400 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6401 MonoInst *iargs [1];
6405 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6406 mono_arch_emit_setret (cfg, method, conv);
6408 mono_arch_emit_setret (cfg, method, *sp);
6411 mono_arch_emit_setret (cfg, method, *sp);
6416 if (sp != stack_start)
6418 MONO_INST_NEW (cfg, ins, OP_BR);
6420 ins->inst_target_bb = end_bblock;
6421 MONO_ADD_INS (bblock, ins);
6422 link_bblock (cfg, bblock, end_bblock);
6423 start_new_bblock = 1;
6427 MONO_INST_NEW (cfg, ins, OP_BR);
6429 target = ip + 1 + (signed char)(*ip);
6431 GET_BBLOCK (cfg, tblock, target);
6432 link_bblock (cfg, bblock, tblock);
6433 ins->inst_target_bb = tblock;
6434 if (sp != stack_start) {
6435 handle_stack_args (cfg, stack_start, sp - stack_start);
6437 CHECK_UNVERIFIABLE (cfg);
6439 MONO_ADD_INS (bblock, ins);
6440 start_new_bblock = 1;
6441 inline_costs += BRANCH_COST;
6455 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6457 target = ip + 1 + *(signed char*)ip;
6463 inline_costs += BRANCH_COST;
6467 MONO_INST_NEW (cfg, ins, OP_BR);
6470 target = ip + 4 + (gint32)read32(ip);
6472 GET_BBLOCK (cfg, tblock, target);
6473 link_bblock (cfg, bblock, tblock);
6474 ins->inst_target_bb = tblock;
6475 if (sp != stack_start) {
6476 handle_stack_args (cfg, stack_start, sp - stack_start);
6478 CHECK_UNVERIFIABLE (cfg);
6481 MONO_ADD_INS (bblock, ins);
6483 start_new_bblock = 1;
6484 inline_costs += BRANCH_COST;
6491 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6492 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6493 guint32 opsize = is_short ? 1 : 4;
6495 CHECK_OPSIZE (opsize);
6497 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6500 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6505 GET_BBLOCK (cfg, tblock, target);
6506 link_bblock (cfg, bblock, tblock);
6507 GET_BBLOCK (cfg, tblock, ip);
6508 link_bblock (cfg, bblock, tblock);
6510 if (sp != stack_start) {
6511 handle_stack_args (cfg, stack_start, sp - stack_start);
6512 CHECK_UNVERIFIABLE (cfg);
6515 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6516 cmp->sreg1 = sp [0]->dreg;
6517 type_from_op (cmp, sp [0], NULL);
6520 #if SIZEOF_VOID_P == 4
6521 if (cmp->opcode == OP_LCOMPARE_IMM) {
6522 /* Convert it to OP_LCOMPARE */
6523 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6524 ins->type = STACK_I8;
6525 ins->dreg = alloc_dreg (cfg, STACK_I8);
6527 MONO_ADD_INS (bblock, ins);
6528 cmp->opcode = OP_LCOMPARE;
6529 cmp->sreg2 = ins->dreg;
6532 MONO_ADD_INS (bblock, cmp);
6534 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6535 type_from_op (ins, sp [0], NULL);
6536 MONO_ADD_INS (bblock, ins);
6537 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6538 GET_BBLOCK (cfg, tblock, target);
6539 ins->inst_true_bb = tblock;
6540 GET_BBLOCK (cfg, tblock, ip);
6541 ins->inst_false_bb = tblock;
6542 start_new_bblock = 2;
6545 inline_costs += BRANCH_COST;
6560 MONO_INST_NEW (cfg, ins, *ip);
6562 target = ip + 4 + (gint32)read32(ip);
6568 inline_costs += BRANCH_COST;
6572 MonoBasicBlock **targets;
6573 MonoBasicBlock *default_bblock;
6574 MonoJumpInfoBBTable *table;
6576 int offset_reg = alloc_preg (cfg);
6577 int target_reg = alloc_preg (cfg);
6578 int table_reg = alloc_preg (cfg);
6579 int sum_reg = alloc_preg (cfg);
6584 n = read32 (ip + 1);
6587 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6591 CHECK_OPSIZE (n * sizeof (guint32));
6592 target = ip + n * sizeof (guint32);
6594 GET_BBLOCK (cfg, default_bblock, target);
6596 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6597 for (i = 0; i < n; ++i) {
6598 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6599 targets [i] = tblock;
6603 if (sp != stack_start) {
6605 * Link the current bb with the targets as well, so handle_stack_args
6606 * will set their in_stack correctly.
6608 link_bblock (cfg, bblock, default_bblock);
6609 for (i = 0; i < n; ++i)
6610 link_bblock (cfg, bblock, targets [i]);
6612 handle_stack_args (cfg, stack_start, sp - stack_start);
6614 CHECK_UNVERIFIABLE (cfg);
6617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6621 for (i = 0; i < n; ++i)
6622 link_bblock (cfg, bblock, targets [i]);
6624 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6625 table->table = targets;
6626 table->table_size = n;
6629 /* ARM implements SWITCH statements differently */
6630 /* FIXME: Make it use the generic implementation */
6631 /* the backend code will deal with aot vs normal case */
6632 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6633 ins->sreg1 = src1->dreg;
6634 ins->inst_p0 = table;
6635 ins->inst_many_bb = targets;
6636 ins->klass = GUINT_TO_POINTER (n);
6637 MONO_ADD_INS (cfg->cbb, ins);
6639 if (sizeof (gpointer) == 8)
6640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6644 #if SIZEOF_VOID_P == 8
6645 /* The upper word might not be zero, and we add it to a 64 bit address later */
6646 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6649 if (cfg->compile_aot) {
6650 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6652 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6653 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6654 ins->inst_p0 = table;
6655 ins->dreg = table_reg;
6656 MONO_ADD_INS (cfg->cbb, ins);
6659 /* FIXME: Use load_memindex */
6660 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6662 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6664 start_new_bblock = 1;
6665 inline_costs += (BRANCH_COST * 2);
6685 dreg = alloc_freg (cfg);
6688 dreg = alloc_lreg (cfg);
6691 dreg = alloc_preg (cfg);
6694 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6695 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6696 ins->flags |= ins_flag;
6698 MONO_ADD_INS (bblock, ins);
6713 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6714 ins->flags |= ins_flag;
6716 MONO_ADD_INS (bblock, ins);
6724 MONO_INST_NEW (cfg, ins, (*ip));
6726 ins->sreg1 = sp [0]->dreg;
6727 ins->sreg2 = sp [1]->dreg;
6728 type_from_op (ins, sp [0], sp [1]);
6730 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6732 /* Use the immediate opcodes if possible */
6733 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6734 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6735 if (imm_opcode != -1) {
6736 ins->opcode = imm_opcode;
6737 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6740 sp [1]->opcode = OP_NOP;
6744 MONO_ADD_INS ((cfg)->cbb, (ins));
6747 mono_decompose_opcode (cfg, ins);
6764 MONO_INST_NEW (cfg, ins, (*ip));
6766 ins->sreg1 = sp [0]->dreg;
6767 ins->sreg2 = sp [1]->dreg;
6768 type_from_op (ins, sp [0], sp [1]);
6770 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6771 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6773 /* FIXME: Pass opcode to is_inst_imm */
6775 /* Use the immediate opcodes if possible */
6776 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6779 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6780 if (imm_opcode != -1) {
6781 ins->opcode = imm_opcode;
6782 if (sp [1]->opcode == OP_I8CONST) {
6783 #if SIZEOF_VOID_P == 8
6784 ins->inst_imm = sp [1]->inst_l;
6786 ins->inst_ls_word = sp [1]->inst_ls_word;
6787 ins->inst_ms_word = sp [1]->inst_ms_word;
6791 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6794 sp [1]->opcode = OP_NOP;
6797 MONO_ADD_INS ((cfg)->cbb, (ins));
6800 mono_decompose_opcode (cfg, ins);
6813 case CEE_CONV_OVF_I8:
6814 case CEE_CONV_OVF_U8:
6818 /* Special case this earlier so we have long constants in the IR */
6819 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6820 int data = sp [-1]->inst_c0;
6821 sp [-1]->opcode = OP_I8CONST;
6822 sp [-1]->type = STACK_I8;
6823 #if SIZEOF_VOID_P == 8
6824 if ((*ip) == CEE_CONV_U8)
6825 sp [-1]->inst_c0 = (guint32)data;
6827 sp [-1]->inst_c0 = data;
6829 sp [-1]->inst_ls_word = data;
6830 if ((*ip) == CEE_CONV_U8)
6831 sp [-1]->inst_ms_word = 0;
6833 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6835 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6842 case CEE_CONV_OVF_I4:
6843 case CEE_CONV_OVF_I1:
6844 case CEE_CONV_OVF_I2:
6845 case CEE_CONV_OVF_I:
6846 case CEE_CONV_OVF_U:
6849 if (sp [-1]->type == STACK_R8) {
6850 ADD_UNOP (CEE_CONV_OVF_I8);
6857 case CEE_CONV_OVF_U1:
6858 case CEE_CONV_OVF_U2:
6859 case CEE_CONV_OVF_U4:
6862 if (sp [-1]->type == STACK_R8) {
6863 ADD_UNOP (CEE_CONV_OVF_U8);
6870 case CEE_CONV_OVF_I1_UN:
6871 case CEE_CONV_OVF_I2_UN:
6872 case CEE_CONV_OVF_I4_UN:
6873 case CEE_CONV_OVF_I8_UN:
6874 case CEE_CONV_OVF_U1_UN:
6875 case CEE_CONV_OVF_U2_UN:
6876 case CEE_CONV_OVF_U4_UN:
6877 case CEE_CONV_OVF_U8_UN:
6878 case CEE_CONV_OVF_I_UN:
6879 case CEE_CONV_OVF_U_UN:
6889 case CEE_ADD_OVF_UN:
6891 case CEE_MUL_OVF_UN:
6893 case CEE_SUB_OVF_UN:
6901 token = read32 (ip + 1);
6902 klass = mini_get_class (method, token, generic_context);
6903 CHECK_TYPELOAD (klass);
6905 if (generic_class_is_reference_type (cfg, klass)) {
6906 MonoInst *store, *load;
6907 int dreg = alloc_preg (cfg);
6909 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6910 load->flags |= ins_flag;
6911 MONO_ADD_INS (cfg->cbb, load);
6913 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6914 store->flags |= ins_flag;
6915 MONO_ADD_INS (cfg->cbb, store);
6917 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6929 token = read32 (ip + 1);
6930 klass = mini_get_class (method, token, generic_context);
6931 CHECK_TYPELOAD (klass);
6933 /* Optimize the common ldobj+stloc combination */
6943 loc_index = ip [5] - CEE_STLOC_0;
6950 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6951 CHECK_LOCAL (loc_index);
6953 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6954 ins->dreg = cfg->locals [loc_index]->dreg;
6960 /* Optimize the ldobj+stobj combination */
6961 /* The reference case ends up being a load+store anyway */
6962 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6967 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6974 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6983 CHECK_STACK_OVF (1);
6985 n = read32 (ip + 1);
6987 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6988 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6989 ins->type = STACK_OBJ;
6992 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6993 MonoInst *iargs [1];
6995 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6996 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6998 if (cfg->opt & MONO_OPT_SHARED) {
6999 MonoInst *iargs [3];
7001 if (cfg->compile_aot) {
7002 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7004 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7005 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7006 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7007 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7008 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7010 if (bblock->out_of_line) {
7011 MonoInst *iargs [2];
7013 if (cfg->method->klass->image == mono_defaults.corlib) {
7015 * Avoid relocations in AOT and save some space by using a
7016 * version of helper_ldstr specialized to mscorlib.
7018 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7019 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7021 /* Avoid creating the string object */
7022 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7023 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7024 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7028 if (cfg->compile_aot) {
7029 NEW_LDSTRCONST (cfg, ins, image, n);
7031 MONO_ADD_INS (bblock, ins);
7034 NEW_PCONST (cfg, ins, NULL);
7035 ins->type = STACK_OBJ;
7036 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7038 MONO_ADD_INS (bblock, ins);
7047 MonoInst *iargs [2];
7048 MonoMethodSignature *fsig;
7051 MonoInst *vtable_arg = NULL;
7054 token = read32 (ip + 1);
7055 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7058 fsig = mono_method_get_signature (cmethod, image, token);
7060 mono_save_token_info (cfg, image, token, cmethod);
7062 if (!mono_class_init (cmethod->klass))
7065 if (cfg->generic_sharing_context)
7066 context_used = mono_method_check_context_used (cmethod);
7068 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7069 if (check_linkdemand (cfg, method, cmethod))
7071 CHECK_CFG_EXCEPTION;
7072 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7073 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7076 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7077 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7078 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7080 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7081 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7083 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7087 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7088 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7090 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7092 CHECK_TYPELOAD (cmethod->klass);
7093 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7098 n = fsig->param_count;
7102 * Generate smaller code for the common newobj <exception> instruction in
7103 * argument checking code.
7105 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7106 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7107 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7108 MonoInst *iargs [3];
7110 g_assert (!vtable_arg);
7114 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7117 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7121 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7126 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7129 g_assert_not_reached ();
7137 /* move the args to allow room for 'this' in the first position */
7143 /* check_call_signature () requires sp[0] to be set */
7144 this_ins.type = STACK_OBJ;
7146 if (check_call_signature (cfg, fsig, sp))
7151 if (mini_class_is_system_array (cmethod->klass)) {
7153 GENERIC_SHARING_FAILURE (*ip);
7154 g_assert (!context_used);
7155 g_assert (!vtable_arg);
7156 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7158 /* Avoid varargs in the common case */
7159 if (fsig->param_count == 1)
7160 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7161 else if (fsig->param_count == 2)
7162 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7164 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7165 } else if (cmethod->string_ctor) {
7166 g_assert (!context_used);
7167 g_assert (!vtable_arg);
7168 /* we simply pass a null pointer */
7169 EMIT_NEW_PCONST (cfg, *sp, NULL);
7170 /* now call the string ctor */
7171 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7173 MonoInst* callvirt_this_arg = NULL;
7175 if (cmethod->klass->valuetype) {
7176 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7177 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7178 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7183 * The code generated by mini_emit_virtual_call () expects
7184 * iargs [0] to be a boxed instance, but luckily the vcall
7185 * will be transformed into a normal call there.
7187 } else if (context_used) {
7191 if (cfg->opt & MONO_OPT_SHARED)
7192 rgctx_info = MONO_RGCTX_INFO_KLASS;
7194 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7195 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7197 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7200 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7202 CHECK_TYPELOAD (cmethod->klass);
7205 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7206 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7207 * As a workaround, we call class cctors before allocating objects.
7209 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7210 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7211 if (cfg->verbose_level > 2)
7212 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7213 class_inits = g_slist_prepend (class_inits, vtable);
7216 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7221 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7223 /* Now call the actual ctor */
7224 /* Avoid virtual calls to ctors if possible */
7225 if (cmethod->klass->marshalbyref)
7226 callvirt_this_arg = sp [0];
7228 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7229 mono_method_check_inlining (cfg, cmethod) &&
7230 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7231 !g_list_find (dont_inline, cmethod)) {
7234 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7235 cfg->real_offset += 5;
7238 inline_costs += costs - 5;
7241 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7243 } else if (context_used &&
7244 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7245 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7246 MonoInst *cmethod_addr;
7248 g_assert (!callvirt_this_arg);
7250 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7251 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7253 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7256 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7257 callvirt_this_arg, NULL, vtable_arg);
7261 if (alloc == NULL) {
7263 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7264 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7278 token = read32 (ip + 1);
7279 klass = mini_get_class (method, token, generic_context);
7280 CHECK_TYPELOAD (klass);
7281 if (sp [0]->type != STACK_OBJ)
7284 if (cfg->generic_sharing_context)
7285 context_used = mono_class_check_context_used (klass);
7294 args [1] = emit_get_rgctx_klass (cfg, context_used,
7295 klass, MONO_RGCTX_INFO_KLASS);
7297 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7301 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7302 MonoMethod *mono_castclass;
7303 MonoInst *iargs [1];
7306 mono_castclass = mono_marshal_get_castclass (klass);
7309 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7310 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7311 g_assert (costs > 0);
7314 cfg->real_offset += 5;
7319 inline_costs += costs;
7322 ins = handle_castclass (cfg, klass, *sp);
7332 token = read32 (ip + 1);
7333 klass = mini_get_class (method, token, generic_context);
7334 CHECK_TYPELOAD (klass);
7335 if (sp [0]->type != STACK_OBJ)
7338 if (cfg->generic_sharing_context)
7339 context_used = mono_class_check_context_used (klass);
7348 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7350 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7354 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7355 MonoMethod *mono_isinst;
7356 MonoInst *iargs [1];
7359 mono_isinst = mono_marshal_get_isinst (klass);
7362 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7363 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7364 g_assert (costs > 0);
7367 cfg->real_offset += 5;
7372 inline_costs += costs;
7375 ins = handle_isinst (cfg, klass, *sp);
7382 case CEE_UNBOX_ANY: {
7386 token = read32 (ip + 1);
7387 klass = mini_get_class (method, token, generic_context);
7388 CHECK_TYPELOAD (klass);
7390 mono_save_token_info (cfg, image, token, klass);
7392 if (cfg->generic_sharing_context)
7393 context_used = mono_class_check_context_used (klass);
7395 if (generic_class_is_reference_type (cfg, klass)) {
7398 MonoInst *iargs [2];
7403 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7404 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7408 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7409 MonoMethod *mono_castclass;
7410 MonoInst *iargs [1];
7413 mono_castclass = mono_marshal_get_castclass (klass);
7416 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7417 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7419 g_assert (costs > 0);
7422 cfg->real_offset += 5;
7426 inline_costs += costs;
7428 ins = handle_castclass (cfg, klass, *sp);
7436 if (mono_class_is_nullable (klass)) {
7437 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7444 ins = handle_unbox (cfg, klass, sp, context_used);
7450 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7463 token = read32 (ip + 1);
7464 klass = mini_get_class (method, token, generic_context);
7465 CHECK_TYPELOAD (klass);
7467 mono_save_token_info (cfg, image, token, klass);
7469 if (cfg->generic_sharing_context)
7470 context_used = mono_class_check_context_used (klass);
7472 if (generic_class_is_reference_type (cfg, klass)) {
7478 if (klass == mono_defaults.void_class)
7480 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7482 /* frequent check in generic code: box (struct), brtrue */
7483 if (!mono_class_is_nullable (klass) &&
7484 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7485 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7487 MONO_INST_NEW (cfg, ins, OP_BR);
7488 if (*ip == CEE_BRTRUE_S) {
7491 target = ip + 1 + (signed char)(*ip);
7496 target = ip + 4 + (gint)(read32 (ip));
7499 GET_BBLOCK (cfg, tblock, target);
7500 link_bblock (cfg, bblock, tblock);
7501 ins->inst_target_bb = tblock;
7502 GET_BBLOCK (cfg, tblock, ip);
7504 * This leads to some inconsistency, since the two bblocks are not
7505 * really connected, but it is needed for handling stack arguments
7506 * correct (See test_0_box_brtrue_opt_regress_81102).
7508 link_bblock (cfg, bblock, tblock);
7509 if (sp != stack_start) {
7510 handle_stack_args (cfg, stack_start, sp - stack_start);
7512 CHECK_UNVERIFIABLE (cfg);
7514 MONO_ADD_INS (bblock, ins);
7515 start_new_bblock = 1;
7523 if (cfg->opt & MONO_OPT_SHARED)
7524 rgctx_info = MONO_RGCTX_INFO_KLASS;
7526 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7527 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7528 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7530 *sp++ = handle_box (cfg, val, klass);
7541 token = read32 (ip + 1);
7542 klass = mini_get_class (method, token, generic_context);
7543 CHECK_TYPELOAD (klass);
7545 mono_save_token_info (cfg, image, token, klass);
7547 if (cfg->generic_sharing_context)
7548 context_used = mono_class_check_context_used (klass);
7550 if (mono_class_is_nullable (klass)) {
7553 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7554 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7558 ins = handle_unbox (cfg, klass, sp, context_used);
7568 MonoClassField *field;
7572 if (*ip == CEE_STFLD) {
7579 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7581 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7584 token = read32 (ip + 1);
7585 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7586 field = mono_method_get_wrapper_data (method, token);
7587 klass = field->parent;
7590 field = mono_field_from_token (image, token, &klass, generic_context);
7594 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7595 FIELD_ACCESS_FAILURE;
7596 mono_class_init (klass);
7598 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7599 if (*ip == CEE_STFLD) {
7600 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7602 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7603 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7604 MonoInst *iargs [5];
7607 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7608 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7609 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7613 if (cfg->opt & MONO_OPT_INLINE) {
7614 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7615 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7616 g_assert (costs > 0);
7618 cfg->real_offset += 5;
7621 inline_costs += costs;
7623 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7628 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7630 store->flags |= ins_flag;
7637 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7638 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7639 MonoInst *iargs [4];
7642 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7643 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7644 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7645 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7646 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7647 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7649 g_assert (costs > 0);
7651 cfg->real_offset += 5;
7655 inline_costs += costs;
7657 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7661 if (sp [0]->type == STACK_VTYPE) {
7664 /* Have to compute the address of the variable */
7666 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7668 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7670 g_assert (var->klass == klass);
7672 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7676 if (*ip == CEE_LDFLDA) {
7677 dreg = alloc_preg (cfg);
7679 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7680 ins->klass = mono_class_from_mono_type (field->type);
7681 ins->type = STACK_MP;
7686 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7687 load->flags |= ins_flag;
7698 MonoClassField *field;
7699 gpointer addr = NULL;
7700 gboolean is_special_static;
7703 token = read32 (ip + 1);
7705 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7706 field = mono_method_get_wrapper_data (method, token);
7707 klass = field->parent;
7710 field = mono_field_from_token (image, token, &klass, generic_context);
7713 mono_class_init (klass);
7714 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7715 FIELD_ACCESS_FAILURE;
7718 * We can only support shared generic static
7719 * field access on architectures where the
7720 * trampoline code has been extended to handle
7721 * the generic class init.
7723 #ifndef MONO_ARCH_VTABLE_REG
7724 GENERIC_SHARING_FAILURE (*ip);
7727 if (cfg->generic_sharing_context)
7728 context_used = mono_class_check_context_used (klass);
7730 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7732 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7733 * to be called here.
7735 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7736 mono_class_vtable (cfg->domain, klass);
7737 CHECK_TYPELOAD (klass);
7739 mono_domain_lock (cfg->domain);
7740 if (cfg->domain->special_static_fields)
7741 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7742 mono_domain_unlock (cfg->domain);
7744 is_special_static = mono_class_field_is_special_static (field);
7746 /* Generate IR to compute the field address */
7748 if ((cfg->opt & MONO_OPT_SHARED) ||
7749 (cfg->compile_aot && is_special_static) ||
7750 (context_used && is_special_static)) {
7751 MonoInst *iargs [2];
7753 g_assert (field->parent);
7754 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7756 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7757 field, MONO_RGCTX_INFO_CLASS_FIELD);
7759 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7761 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7762 } else if (context_used) {
7763 MonoInst *static_data;
7766 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7767 method->klass->name_space, method->klass->name, method->name,
7768 depth, field->offset);
7771 if (mono_class_needs_cctor_run (klass, method)) {
7775 vtable = emit_get_rgctx_klass (cfg, context_used,
7776 klass, MONO_RGCTX_INFO_VTABLE);
7778 // FIXME: This doesn't work since it tries to pass the argument
7779 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7781 * The vtable pointer is always passed in a register regardless of
7782 * the calling convention, so assign it manually, and make a call
7783 * using a signature without parameters.
7785 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7786 #ifdef MONO_ARCH_VTABLE_REG
7787 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7788 cfg->uses_vtable_reg = TRUE;
7795 * The pointer we're computing here is
7797 * super_info.static_data + field->offset
7799 static_data = emit_get_rgctx_klass (cfg, context_used,
7800 klass, MONO_RGCTX_INFO_STATIC_DATA);
7802 if (field->offset == 0) {
7805 int addr_reg = mono_alloc_preg (cfg);
7806 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7808 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7809 MonoInst *iargs [2];
7811 g_assert (field->parent);
7812 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7813 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7814 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7816 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7818 CHECK_TYPELOAD (klass);
7820 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7821 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7822 if (cfg->verbose_level > 2)
7823 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7824 class_inits = g_slist_prepend (class_inits, vtable);
7826 if (cfg->run_cctors) {
7828 /* This makes so that inline cannot trigger */
7829 /* .cctors: too many apps depend on them */
7830 /* running with a specific order... */
7831 if (! vtable->initialized)
7833 ex = mono_runtime_class_init_full (vtable, FALSE);
7835 set_exception_object (cfg, ex);
7836 goto exception_exit;
7840 addr = (char*)vtable->data + field->offset;
7842 if (cfg->compile_aot)
7843 EMIT_NEW_SFLDACONST (cfg, ins, field);
7845 EMIT_NEW_PCONST (cfg, ins, addr);
7848 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7849 * This could be later optimized to do just a couple of
7850 * memory dereferences with constant offsets.
7852 MonoInst *iargs [1];
7853 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7854 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7858 /* Generate IR to do the actual load/store operation */
7860 if (*ip == CEE_LDSFLDA) {
7861 ins->klass = mono_class_from_mono_type (field->type);
7863 } else if (*ip == CEE_STSFLD) {
7868 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7869 store->flags |= ins_flag;
7871 gboolean is_const = FALSE;
7872 MonoVTable *vtable = NULL;
7874 if (!context_used) {
7875 vtable = mono_class_vtable (cfg->domain, klass);
7876 CHECK_TYPELOAD (klass);
7878 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7879 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7880 gpointer addr = (char*)vtable->data + field->offset;
7881 int ro_type = field->type->type;
7882 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7883 ro_type = field->type->data.klass->enum_basetype->type;
7885 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7888 case MONO_TYPE_BOOLEAN:
7890 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7894 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7897 case MONO_TYPE_CHAR:
7899 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7903 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7908 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7912 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7917 case MONO_TYPE_STRING:
7918 case MONO_TYPE_OBJECT:
7919 case MONO_TYPE_CLASS:
7920 case MONO_TYPE_SZARRAY:
7922 case MONO_TYPE_FNPTR:
7923 case MONO_TYPE_ARRAY:
7924 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7925 type_to_eval_stack_type ((cfg), field->type, *sp);
7930 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7935 case MONO_TYPE_VALUETYPE:
7945 CHECK_STACK_OVF (1);
7947 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7948 load->flags |= ins_flag;
7961 token = read32 (ip + 1);
7962 klass = mini_get_class (method, token, generic_context);
7963 CHECK_TYPELOAD (klass);
7964 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7965 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7976 const char *data_ptr;
7983 token = read32 (ip + 1);
7985 klass = mini_get_class (method, token, generic_context);
7986 CHECK_TYPELOAD (klass);
7988 if (cfg->generic_sharing_context)
7989 context_used = mono_class_check_context_used (klass);
7994 /* FIXME: Decompose later to help abcrem */
7997 args [0] = emit_get_rgctx_klass (cfg, context_used,
7998 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8003 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8005 if (cfg->opt & MONO_OPT_SHARED) {
8006 /* Decompose now to avoid problems with references to the domainvar */
8007 MonoInst *iargs [3];
8009 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8010 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8013 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8015 /* Decompose later since it is needed by abcrem */
8016 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8017 ins->dreg = alloc_preg (cfg);
8018 ins->sreg1 = sp [0]->dreg;
8019 ins->inst_newa_class = klass;
8020 ins->type = STACK_OBJ;
8022 MONO_ADD_INS (cfg->cbb, ins);
8023 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8024 cfg->cbb->has_array_access = TRUE;
8026 /* Needed so mono_emit_load_get_addr () gets called */
8027 mono_get_got_var (cfg);
8037 * we inline/optimize the initialization sequence if possible.
8038 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8039 * for small sizes open code the memcpy
8040 * ensure the rva field is big enough
8042 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
8043 MonoMethod *memcpy_method = get_memcpy_method ();
8044 MonoInst *iargs [3];
8045 int add_reg = alloc_preg (cfg);
8047 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8048 if (cfg->compile_aot) {
8049 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
8051 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8053 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8054 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8063 if (sp [0]->type != STACK_OBJ)
8066 dreg = alloc_preg (cfg);
8067 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8068 ins->dreg = alloc_preg (cfg);
8069 ins->sreg1 = sp [0]->dreg;
8070 ins->type = STACK_I4;
8071 MONO_ADD_INS (cfg->cbb, ins);
8072 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8073 cfg->cbb->has_array_access = TRUE;
8081 if (sp [0]->type != STACK_OBJ)
8084 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8086 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8087 CHECK_TYPELOAD (klass);
8088 /* we need to make sure that this array is exactly the type it needs
8089 * to be for correctness. the wrappers are lax with their usage
8090 * so we need to ignore them here
8092 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8093 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8096 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8100 case CEE_LDELEM_ANY:
8111 case CEE_LDELEM_REF: {
8117 if (*ip == CEE_LDELEM_ANY) {
8119 token = read32 (ip + 1);
8120 klass = mini_get_class (method, token, generic_context);
8121 CHECK_TYPELOAD (klass);
8122 mono_class_init (klass);
8125 klass = array_access_to_klass (*ip);
8127 if (sp [0]->type != STACK_OBJ)
8130 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8132 if (sp [1]->opcode == OP_ICONST) {
8133 int array_reg = sp [0]->dreg;
8134 int index_reg = sp [1]->dreg;
8135 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8137 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8140 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8141 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8144 if (*ip == CEE_LDELEM_ANY)
8157 case CEE_STELEM_REF:
8158 case CEE_STELEM_ANY: {
8164 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8166 if (*ip == CEE_STELEM_ANY) {
8168 token = read32 (ip + 1);
8169 klass = mini_get_class (method, token, generic_context);
8170 CHECK_TYPELOAD (klass);
8171 mono_class_init (klass);
8174 klass = array_access_to_klass (*ip);
8176 if (sp [0]->type != STACK_OBJ)
8179 /* storing a NULL doesn't need any of the complex checks in stelemref */
8180 if (generic_class_is_reference_type (cfg, klass) &&
8181 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8182 MonoMethod* helper = mono_marshal_get_stelemref ();
8183 MonoInst *iargs [3];
8185 if (sp [0]->type != STACK_OBJ)
8187 if (sp [2]->type != STACK_OBJ)
8194 mono_emit_method_call (cfg, helper, iargs, NULL);
8196 if (sp [1]->opcode == OP_ICONST) {
8197 int array_reg = sp [0]->dreg;
8198 int index_reg = sp [1]->dreg;
8199 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8201 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8204 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8205 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8209 if (*ip == CEE_STELEM_ANY)
8216 case CEE_CKFINITE: {
8220 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8221 ins->sreg1 = sp [0]->dreg;
8222 ins->dreg = alloc_freg (cfg);
8223 ins->type = STACK_R8;
8224 MONO_ADD_INS (bblock, ins);
8227 mono_decompose_opcode (cfg, ins);
8232 case CEE_REFANYVAL: {
8233 MonoInst *src_var, *src;
8235 int klass_reg = alloc_preg (cfg);
8236 int dreg = alloc_preg (cfg);
8239 MONO_INST_NEW (cfg, ins, *ip);
8242 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8243 CHECK_TYPELOAD (klass);
8244 mono_class_init (klass);
8246 if (cfg->generic_sharing_context)
8247 context_used = mono_class_check_context_used (klass);
8250 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8252 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8253 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8254 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8257 MonoInst *klass_ins;
8259 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8260 klass, MONO_RGCTX_INFO_KLASS);
8263 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8264 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8266 mini_emit_class_check (cfg, klass_reg, klass);
8268 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8269 ins->type = STACK_MP;
8274 case CEE_MKREFANY: {
8275 MonoInst *loc, *addr;
8278 MONO_INST_NEW (cfg, ins, *ip);
8281 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8282 CHECK_TYPELOAD (klass);
8283 mono_class_init (klass);
8285 if (cfg->generic_sharing_context)
8286 context_used = mono_class_check_context_used (klass);
8288 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8289 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8292 MonoInst *const_ins;
8293 int type_reg = alloc_preg (cfg);
8295 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8298 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8299 } else if (cfg->compile_aot) {
8300 int const_reg = alloc_preg (cfg);
8301 int type_reg = alloc_preg (cfg);
8303 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8305 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8308 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8309 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8311 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8313 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8314 ins->type = STACK_VTYPE;
8315 ins->klass = mono_defaults.typed_reference_class;
8322 MonoClass *handle_class;
8324 CHECK_STACK_OVF (1);
8327 n = read32 (ip + 1);
8329 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8330 handle = mono_method_get_wrapper_data (method, n);
8331 handle_class = mono_method_get_wrapper_data (method, n + 1);
8332 if (handle_class == mono_defaults.typehandle_class)
8333 handle = &((MonoClass*)handle)->byval_arg;
8336 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8340 mono_class_init (handle_class);
8341 if (cfg->generic_sharing_context) {
8342 if (handle_class == mono_defaults.typehandle_class) {
8343 /* If we get a MONO_TYPE_CLASS
8344 then we need to provide the
8346 instantiation of it. */
8347 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8350 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8351 } else if (handle_class == mono_defaults.fieldhandle_class)
8352 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8353 else if (handle_class == mono_defaults.methodhandle_class)
8354 context_used = mono_method_check_context_used (handle);
8356 g_assert_not_reached ();
8359 if (cfg->opt & MONO_OPT_SHARED) {
8360 MonoInst *addr, *vtvar, *iargs [3];
8361 int method_context_used;
8363 if (cfg->generic_sharing_context)
8364 method_context_used = mono_method_check_context_used (method);
8366 method_context_used = 0;
8368 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8370 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8371 EMIT_NEW_ICONST (cfg, iargs [1], n);
8372 if (method_context_used) {
8373 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8374 method, MONO_RGCTX_INFO_METHOD);
8375 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8377 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8378 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8380 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8382 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8384 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8386 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8387 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8388 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8389 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8390 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8391 MonoClass *tclass = mono_class_from_mono_type (handle);
8393 mono_class_init (tclass);
8395 ins = emit_get_rgctx_klass (cfg, context_used,
8396 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8397 } else if (cfg->compile_aot) {
8398 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8400 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8402 ins->type = STACK_OBJ;
8403 ins->klass = cmethod->klass;
8406 MonoInst *addr, *vtvar;
8408 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8411 if (handle_class == mono_defaults.typehandle_class) {
8412 ins = emit_get_rgctx_klass (cfg, context_used,
8413 mono_class_from_mono_type (handle),
8414 MONO_RGCTX_INFO_TYPE);
8415 } else if (handle_class == mono_defaults.methodhandle_class) {
8416 ins = emit_get_rgctx_method (cfg, context_used,
8417 handle, MONO_RGCTX_INFO_METHOD);
8418 } else if (handle_class == mono_defaults.fieldhandle_class) {
8419 ins = emit_get_rgctx_field (cfg, context_used,
8420 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8422 g_assert_not_reached ();
8424 } else if (cfg->compile_aot) {
8425 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8427 EMIT_NEW_PCONST (cfg, ins, handle);
8429 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8430 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8431 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8441 MONO_INST_NEW (cfg, ins, OP_THROW);
8443 ins->sreg1 = sp [0]->dreg;
8445 bblock->out_of_line = TRUE;
8446 MONO_ADD_INS (bblock, ins);
8447 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8448 MONO_ADD_INS (bblock, ins);
8451 link_bblock (cfg, bblock, end_bblock);
8452 start_new_bblock = 1;
8454 case CEE_ENDFINALLY:
8455 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8456 MONO_ADD_INS (bblock, ins);
8458 start_new_bblock = 1;
8461 * Control will leave the method so empty the stack, otherwise
8462 * the next basic block will start with a nonempty stack.
8464 while (sp != stack_start) {
8472 if (*ip == CEE_LEAVE) {
8474 target = ip + 5 + (gint32)read32(ip + 1);
8477 target = ip + 2 + (signed char)(ip [1]);
8480 /* empty the stack */
8481 while (sp != stack_start) {
8486 * If this leave statement is in a catch block, check for a
8487 * pending exception, and rethrow it if necessary.
8489 for (i = 0; i < header->num_clauses; ++i) {
8490 MonoExceptionClause *clause = &header->clauses [i];
8493 * Use <= in the final comparison to handle clauses with multiple
8494 * leave statements, like in bug #78024.
8495 * The ordering of the exception clauses guarantees that we find the
8498 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8500 MonoBasicBlock *dont_throw;
8505 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8508 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8510 NEW_BBLOCK (cfg, dont_throw);
8513 * Currently, we allways rethrow the abort exception, despite the
8514 * fact that this is not correct. See thread6.cs for an example.
8515 * But propagating the abort exception is more important than
8516 * getting the sematics right.
8518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8520 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8522 MONO_START_BB (cfg, dont_throw);
8527 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8529 for (tmp = handlers; tmp; tmp = tmp->next) {
8531 link_bblock (cfg, bblock, tblock);
8532 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8533 ins->inst_target_bb = tblock;
8534 MONO_ADD_INS (bblock, ins);
8536 g_list_free (handlers);
8539 MONO_INST_NEW (cfg, ins, OP_BR);
8540 MONO_ADD_INS (bblock, ins);
8541 GET_BBLOCK (cfg, tblock, target);
8542 link_bblock (cfg, bblock, tblock);
8543 ins->inst_target_bb = tblock;
8544 start_new_bblock = 1;
8546 if (*ip == CEE_LEAVE)
8555 * Mono specific opcodes
8557 case MONO_CUSTOM_PREFIX: {
8559 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8563 case CEE_MONO_ICALL: {
8565 MonoJitICallInfo *info;
8567 token = read32 (ip + 2);
8568 func = mono_method_get_wrapper_data (method, token);
8569 info = mono_find_jit_icall_by_addr (func);
8572 CHECK_STACK (info->sig->param_count);
8573 sp -= info->sig->param_count;
8575 ins = mono_emit_jit_icall (cfg, info->func, sp);
8576 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8580 inline_costs += 10 * num_calls++;
8584 case CEE_MONO_LDPTR: {
8587 CHECK_STACK_OVF (1);
8589 token = read32 (ip + 2);
8591 ptr = mono_method_get_wrapper_data (method, token);
8592 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8593 MonoJitICallInfo *callinfo;
8594 const char *icall_name;
8596 icall_name = method->name + strlen ("__icall_wrapper_");
8597 g_assert (icall_name);
8598 callinfo = mono_find_jit_icall_by_name (icall_name);
8599 g_assert (callinfo);
8601 if (ptr == callinfo->func) {
8602 /* Will be transformed into an AOTCONST later */
8603 EMIT_NEW_PCONST (cfg, ins, ptr);
8609 /* FIXME: Generalize this */
8610 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8611 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8616 EMIT_NEW_PCONST (cfg, ins, ptr);
8619 inline_costs += 10 * num_calls++;
8620 /* Can't embed random pointers into AOT code */
8621 cfg->disable_aot = 1;
8624 case CEE_MONO_ICALL_ADDR: {
8625 MonoMethod *cmethod;
8628 CHECK_STACK_OVF (1);
8630 token = read32 (ip + 2);
8632 cmethod = mono_method_get_wrapper_data (method, token);
8634 if (cfg->compile_aot) {
8635 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8637 ptr = mono_lookup_internal_call (cmethod);
8639 EMIT_NEW_PCONST (cfg, ins, ptr);
8645 case CEE_MONO_VTADDR: {
8646 MonoInst *src_var, *src;
8652 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8653 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8658 case CEE_MONO_NEWOBJ: {
8659 MonoInst *iargs [2];
8661 CHECK_STACK_OVF (1);
8663 token = read32 (ip + 2);
8664 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8665 mono_class_init (klass);
8666 NEW_DOMAINCONST (cfg, iargs [0]);
8667 MONO_ADD_INS (cfg->cbb, iargs [0]);
8668 NEW_CLASSCONST (cfg, iargs [1], klass);
8669 MONO_ADD_INS (cfg->cbb, iargs [1]);
8670 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8672 inline_costs += 10 * num_calls++;
8675 case CEE_MONO_OBJADDR:
8678 MONO_INST_NEW (cfg, ins, OP_MOVE);
8679 ins->dreg = alloc_preg (cfg);
8680 ins->sreg1 = sp [0]->dreg;
8681 ins->type = STACK_MP;
8682 MONO_ADD_INS (cfg->cbb, ins);
8686 case CEE_MONO_LDNATIVEOBJ:
8688 * Similar to LDOBJ, but instead load the unmanaged
8689 * representation of the vtype to the stack.
8694 token = read32 (ip + 2);
8695 klass = mono_method_get_wrapper_data (method, token);
8696 g_assert (klass->valuetype);
8697 mono_class_init (klass);
8700 MonoInst *src, *dest, *temp;
8703 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8704 temp->backend.is_pinvoke = 1;
8705 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8706 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8708 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8709 dest->type = STACK_VTYPE;
8710 dest->klass = klass;
8716 case CEE_MONO_RETOBJ: {
8718 * Same as RET, but return the native representation of a vtype
8721 g_assert (cfg->ret);
8722 g_assert (mono_method_signature (method)->pinvoke);
8727 token = read32 (ip + 2);
8728 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8730 if (!cfg->vret_addr) {
8731 g_assert (cfg->ret_var_is_local);
8733 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8735 EMIT_NEW_RETLOADA (cfg, ins);
8737 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8739 if (sp != stack_start)
8742 MONO_INST_NEW (cfg, ins, OP_BR);
8743 ins->inst_target_bb = end_bblock;
8744 MONO_ADD_INS (bblock, ins);
8745 link_bblock (cfg, bblock, end_bblock);
8746 start_new_bblock = 1;
8750 case CEE_MONO_CISINST:
8751 case CEE_MONO_CCASTCLASS: {
8756 token = read32 (ip + 2);
8757 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8758 if (ip [1] == CEE_MONO_CISINST)
8759 ins = handle_cisinst (cfg, klass, sp [0]);
8761 ins = handle_ccastclass (cfg, klass, sp [0]);
8767 case CEE_MONO_SAVE_LMF:
8768 case CEE_MONO_RESTORE_LMF:
8769 #ifdef MONO_ARCH_HAVE_LMF_OPS
8770 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8771 MONO_ADD_INS (bblock, ins);
8772 cfg->need_lmf_area = TRUE;
8776 case CEE_MONO_CLASSCONST:
8777 CHECK_STACK_OVF (1);
8779 token = read32 (ip + 2);
8780 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8783 inline_costs += 10 * num_calls++;
8785 case CEE_MONO_NOT_TAKEN:
8786 bblock->out_of_line = TRUE;
8790 CHECK_STACK_OVF (1);
8792 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8793 ins->dreg = alloc_preg (cfg);
8794 ins->inst_offset = (gint32)read32 (ip + 2);
8795 ins->type = STACK_PTR;
8796 MONO_ADD_INS (bblock, ins);
8801 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8811 /* somewhat similar to LDTOKEN */
8812 MonoInst *addr, *vtvar;
8813 CHECK_STACK_OVF (1);
8814 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8816 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8817 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8819 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8820 ins->type = STACK_VTYPE;
8821 ins->klass = mono_defaults.argumenthandle_class;
8834 * The following transforms:
8835 * CEE_CEQ into OP_CEQ
8836 * CEE_CGT into OP_CGT
8837 * CEE_CGT_UN into OP_CGT_UN
8838 * CEE_CLT into OP_CLT
8839 * CEE_CLT_UN into OP_CLT_UN
8841 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8843 MONO_INST_NEW (cfg, ins, cmp->opcode);
8845 cmp->sreg1 = sp [0]->dreg;
8846 cmp->sreg2 = sp [1]->dreg;
8847 type_from_op (cmp, sp [0], sp [1]);
8849 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8850 cmp->opcode = OP_LCOMPARE;
8851 else if (sp [0]->type == STACK_R8)
8852 cmp->opcode = OP_FCOMPARE;
8854 cmp->opcode = OP_ICOMPARE;
8855 MONO_ADD_INS (bblock, cmp);
8856 ins->type = STACK_I4;
8857 ins->dreg = alloc_dreg (cfg, ins->type);
8858 type_from_op (ins, sp [0], sp [1]);
8860 if (cmp->opcode == OP_FCOMPARE) {
8862 * The backends expect the fceq opcodes to do the
8865 cmp->opcode = OP_NOP;
8866 ins->sreg1 = cmp->sreg1;
8867 ins->sreg2 = cmp->sreg2;
8869 MONO_ADD_INS (bblock, ins);
8876 MonoMethod *cil_method, *ctor_method;
8877 gboolean needs_static_rgctx_invoke;
8879 CHECK_STACK_OVF (1);
8881 n = read32 (ip + 2);
8882 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8885 mono_class_init (cmethod->klass);
8887 mono_save_token_info (cfg, image, n, cmethod);
8889 if (cfg->generic_sharing_context)
8890 context_used = mono_method_check_context_used (cmethod);
8892 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8894 cil_method = cmethod;
8895 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8896 METHOD_ACCESS_FAILURE;
8898 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8899 if (check_linkdemand (cfg, method, cmethod))
8901 CHECK_CFG_EXCEPTION;
8902 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8903 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8907 * Optimize the common case of ldftn+delegate creation
8909 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8910 /* FIXME: SGEN support */
8911 /* FIXME: handle shared static generic methods */
8912 /* FIXME: handle this in shared code */
8913 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8914 MonoInst *target_ins;
8917 if (cfg->verbose_level > 3)
8918 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8919 target_ins = sp [-1];
8921 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8929 if (needs_static_rgctx_invoke)
8930 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8932 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8933 } else if (needs_static_rgctx_invoke) {
8934 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8936 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8938 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8942 inline_costs += 10 * num_calls++;
8945 case CEE_LDVIRTFTN: {
8950 n = read32 (ip + 2);
8951 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8954 mono_class_init (cmethod->klass);
8956 if (cfg->generic_sharing_context)
8957 context_used = mono_method_check_context_used (cmethod);
8959 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8960 if (check_linkdemand (cfg, method, cmethod))
8962 CHECK_CFG_EXCEPTION;
8963 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8964 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8971 args [1] = emit_get_rgctx_method (cfg, context_used,
8972 cmethod, MONO_RGCTX_INFO_METHOD);
8973 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8975 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8976 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8980 inline_costs += 10 * num_calls++;
8984 CHECK_STACK_OVF (1);
8986 n = read16 (ip + 2);
8988 EMIT_NEW_ARGLOAD (cfg, ins, n);
8993 CHECK_STACK_OVF (1);
8995 n = read16 (ip + 2);
8997 NEW_ARGLOADA (cfg, ins, n);
8998 MONO_ADD_INS (cfg->cbb, ins);
9006 n = read16 (ip + 2);
9008 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9010 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9014 CHECK_STACK_OVF (1);
9016 n = read16 (ip + 2);
9018 EMIT_NEW_LOCLOAD (cfg, ins, n);
9023 unsigned char *tmp_ip;
9024 CHECK_STACK_OVF (1);
9026 n = read16 (ip + 2);
9029 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9035 EMIT_NEW_LOCLOADA (cfg, ins, n);
9044 n = read16 (ip + 2);
9046 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9048 emit_stloc_ir (cfg, sp, header, n);
9055 if (sp != stack_start)
9057 if (cfg->method != method)
9059 * Inlining this into a loop in a parent could lead to
9060 * stack overflows which is different behavior than the
9061 * non-inlined case, thus disable inlining in this case.
9063 goto inline_failure;
9065 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9066 ins->dreg = alloc_preg (cfg);
9067 ins->sreg1 = sp [0]->dreg;
9068 ins->type = STACK_PTR;
9069 MONO_ADD_INS (cfg->cbb, ins);
9071 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9072 if (header->init_locals)
9073 ins->flags |= MONO_INST_INIT;
9078 case CEE_ENDFILTER: {
9079 MonoExceptionClause *clause, *nearest;
9080 int cc, nearest_num;
9084 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9086 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9087 ins->sreg1 = (*sp)->dreg;
9088 MONO_ADD_INS (bblock, ins);
9089 start_new_bblock = 1;
9094 for (cc = 0; cc < header->num_clauses; ++cc) {
9095 clause = &header->clauses [cc];
9096 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9097 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9098 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9104 if ((ip - header->code) != nearest->handler_offset)
9109 case CEE_UNALIGNED_:
9110 ins_flag |= MONO_INST_UNALIGNED;
9111 /* FIXME: record alignment? we can assume 1 for now */
9116 ins_flag |= MONO_INST_VOLATILE;
9120 ins_flag |= MONO_INST_TAILCALL;
9121 cfg->flags |= MONO_CFG_HAS_TAIL;
9122 /* Can't inline tail calls at this time */
9123 inline_costs += 100000;
9130 token = read32 (ip + 2);
9131 klass = mini_get_class (method, token, generic_context);
9132 CHECK_TYPELOAD (klass);
9133 if (generic_class_is_reference_type (cfg, klass))
9134 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9136 mini_emit_initobj (cfg, *sp, NULL, klass);
9140 case CEE_CONSTRAINED_:
9142 token = read32 (ip + 2);
9143 constrained_call = mono_class_get_full (image, token, generic_context);
9144 CHECK_TYPELOAD (constrained_call);
9149 MonoInst *iargs [3];
9153 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9154 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9155 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9156 /* emit_memset only works when val == 0 */
9157 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9162 if (ip [1] == CEE_CPBLK) {
9163 MonoMethod *memcpy_method = get_memcpy_method ();
9164 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9166 MonoMethod *memset_method = get_memset_method ();
9167 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9177 ins_flag |= MONO_INST_NOTYPECHECK;
9179 ins_flag |= MONO_INST_NORANGECHECK;
9180 /* we ignore the no-nullcheck for now since we
9181 * really do it explicitly only when doing callvirt->call
9187 int handler_offset = -1;
9189 for (i = 0; i < header->num_clauses; ++i) {
9190 MonoExceptionClause *clause = &header->clauses [i];
9191 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9192 handler_offset = clause->handler_offset;
9197 bblock->flags |= BB_EXCEPTION_UNSAFE;
9199 g_assert (handler_offset != -1);
9201 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9202 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9203 ins->sreg1 = load->dreg;
9204 MONO_ADD_INS (bblock, ins);
9206 link_bblock (cfg, bblock, end_bblock);
9207 start_new_bblock = 1;
9215 CHECK_STACK_OVF (1);
9217 token = read32 (ip + 2);
9218 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9219 MonoType *type = mono_type_create_from_typespec (image, token);
9220 token = mono_type_size (type, &ialign);
9222 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9223 CHECK_TYPELOAD (klass);
9224 mono_class_init (klass);
9225 token = mono_class_value_size (klass, &align);
9227 EMIT_NEW_ICONST (cfg, ins, token);
9232 case CEE_REFANYTYPE: {
9233 MonoInst *src_var, *src;
9239 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9241 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9242 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9253 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9258 g_error ("opcode 0x%02x not handled", *ip);
9261 if (start_new_bblock != 1)
9264 bblock->cil_length = ip - bblock->cil_code;
9265 bblock->next_bb = end_bblock;
9267 if (cfg->method == method && cfg->domainvar) {
9269 MonoInst *get_domain;
9271 cfg->cbb = init_localsbb;
9273 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9274 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9277 get_domain->dreg = alloc_preg (cfg);
9278 MONO_ADD_INS (cfg->cbb, get_domain);
9280 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9281 MONO_ADD_INS (cfg->cbb, store);
9284 if (cfg->method == method && cfg->got_var)
9285 mono_emit_load_got_addr (cfg);
9287 if (header->init_locals) {
9290 cfg->cbb = init_localsbb;
9291 cfg->ip = header->code;
9292 for (i = 0; i < header->num_locals; ++i) {
9293 MonoType *ptype = header->locals [i];
9294 int t = ptype->type;
9295 dreg = cfg->locals [i]->dreg;
9297 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9298 t = ptype->data.klass->enum_basetype->type;
9300 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9301 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9302 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9303 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9304 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9305 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9306 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9307 ins->type = STACK_R8;
9308 ins->inst_p0 = (void*)&r8_0;
9309 ins->dreg = alloc_dreg (cfg, STACK_R8);
9310 MONO_ADD_INS (init_localsbb, ins);
9311 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9312 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9313 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9314 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9316 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9323 if (cfg->method == method) {
9325 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9326 bb->region = mono_find_block_region (cfg, bb->real_offset);
9328 mono_create_spvar_for_region (cfg, bb->region);
9329 if (cfg->verbose_level > 2)
9330 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9334 g_slist_free (class_inits);
9335 dont_inline = g_list_remove (dont_inline, method);
9337 if (inline_costs < 0) {
9340 /* Method is too large */
9341 mname = mono_method_full_name (method, TRUE);
9342 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9343 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9348 if ((cfg->verbose_level > 2) && (cfg->method == method))
9349 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9351 return inline_costs;
9354 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9355 g_slist_free (class_inits);
9356 dont_inline = g_list_remove (dont_inline, method);
9360 g_slist_free (class_inits);
9361 dont_inline = g_list_remove (dont_inline, method);
9365 g_slist_free (class_inits);
9366 dont_inline = g_list_remove (dont_inline, method);
9367 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9371 g_slist_free (class_inits);
9372 dont_inline = g_list_remove (dont_inline, method);
9373 set_exception_type_from_invalid_il (cfg, method, ip);
9378 store_membase_reg_to_store_membase_imm (int opcode)
9381 case OP_STORE_MEMBASE_REG:
9382 return OP_STORE_MEMBASE_IMM;
9383 case OP_STOREI1_MEMBASE_REG:
9384 return OP_STOREI1_MEMBASE_IMM;
9385 case OP_STOREI2_MEMBASE_REG:
9386 return OP_STOREI2_MEMBASE_IMM;
9387 case OP_STOREI4_MEMBASE_REG:
9388 return OP_STOREI4_MEMBASE_IMM;
9389 case OP_STOREI8_MEMBASE_REG:
9390 return OP_STOREI8_MEMBASE_IMM;
9392 g_assert_not_reached ();
9398 #endif /* DISABLE_JIT */
9401 mono_op_to_op_imm (int opcode)
9411 return OP_IDIV_UN_IMM;
9415 return OP_IREM_UN_IMM;
9429 return OP_ISHR_UN_IMM;
9446 return OP_LSHR_UN_IMM;
9449 return OP_COMPARE_IMM;
9451 return OP_ICOMPARE_IMM;
9453 return OP_LCOMPARE_IMM;
9455 case OP_STORE_MEMBASE_REG:
9456 return OP_STORE_MEMBASE_IMM;
9457 case OP_STOREI1_MEMBASE_REG:
9458 return OP_STOREI1_MEMBASE_IMM;
9459 case OP_STOREI2_MEMBASE_REG:
9460 return OP_STOREI2_MEMBASE_IMM;
9461 case OP_STOREI4_MEMBASE_REG:
9462 return OP_STOREI4_MEMBASE_IMM;
9464 #if defined(__i386__) || defined (__x86_64__)
9466 return OP_X86_PUSH_IMM;
9467 case OP_X86_COMPARE_MEMBASE_REG:
9468 return OP_X86_COMPARE_MEMBASE_IMM;
9470 #if defined(__x86_64__)
9471 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9472 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9474 case OP_VOIDCALL_REG:
9483 return OP_LOCALLOC_IMM;
9490 ldind_to_load_membase (int opcode)
9494 return OP_LOADI1_MEMBASE;
9496 return OP_LOADU1_MEMBASE;
9498 return OP_LOADI2_MEMBASE;
9500 return OP_LOADU2_MEMBASE;
9502 return OP_LOADI4_MEMBASE;
9504 return OP_LOADU4_MEMBASE;
9506 return OP_LOAD_MEMBASE;
9508 return OP_LOAD_MEMBASE;
9510 return OP_LOADI8_MEMBASE;
9512 return OP_LOADR4_MEMBASE;
9514 return OP_LOADR8_MEMBASE;
9516 g_assert_not_reached ();
9523 stind_to_store_membase (int opcode)
9527 return OP_STOREI1_MEMBASE_REG;
9529 return OP_STOREI2_MEMBASE_REG;
9531 return OP_STOREI4_MEMBASE_REG;
9534 return OP_STORE_MEMBASE_REG;
9536 return OP_STOREI8_MEMBASE_REG;
9538 return OP_STORER4_MEMBASE_REG;
9540 return OP_STORER8_MEMBASE_REG;
9542 g_assert_not_reached ();
9549 mono_load_membase_to_load_mem (int opcode)
9551 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9552 #if defined(__i386__) || defined(__x86_64__)
9554 case OP_LOAD_MEMBASE:
9556 case OP_LOADU1_MEMBASE:
9557 return OP_LOADU1_MEM;
9558 case OP_LOADU2_MEMBASE:
9559 return OP_LOADU2_MEM;
9560 case OP_LOADI4_MEMBASE:
9561 return OP_LOADI4_MEM;
9562 case OP_LOADU4_MEMBASE:
9563 return OP_LOADU4_MEM;
9564 #if SIZEOF_VOID_P == 8
9565 case OP_LOADI8_MEMBASE:
9566 return OP_LOADI8_MEM;
9575 op_to_op_dest_membase (int store_opcode, int opcode)
9577 #if defined(__i386__)
9578 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9583 return OP_X86_ADD_MEMBASE_REG;
9585 return OP_X86_SUB_MEMBASE_REG;
9587 return OP_X86_AND_MEMBASE_REG;
9589 return OP_X86_OR_MEMBASE_REG;
9591 return OP_X86_XOR_MEMBASE_REG;
9594 return OP_X86_ADD_MEMBASE_IMM;
9597 return OP_X86_SUB_MEMBASE_IMM;
9600 return OP_X86_AND_MEMBASE_IMM;
9603 return OP_X86_OR_MEMBASE_IMM;
9606 return OP_X86_XOR_MEMBASE_IMM;
9612 #if defined(__x86_64__)
9613 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9618 return OP_X86_ADD_MEMBASE_REG;
9620 return OP_X86_SUB_MEMBASE_REG;
9622 return OP_X86_AND_MEMBASE_REG;
9624 return OP_X86_OR_MEMBASE_REG;
9626 return OP_X86_XOR_MEMBASE_REG;
9628 return OP_X86_ADD_MEMBASE_IMM;
9630 return OP_X86_SUB_MEMBASE_IMM;
9632 return OP_X86_AND_MEMBASE_IMM;
9634 return OP_X86_OR_MEMBASE_IMM;
9636 return OP_X86_XOR_MEMBASE_IMM;
9638 return OP_AMD64_ADD_MEMBASE_REG;
9640 return OP_AMD64_SUB_MEMBASE_REG;
9642 return OP_AMD64_AND_MEMBASE_REG;
9644 return OP_AMD64_OR_MEMBASE_REG;
9646 return OP_AMD64_XOR_MEMBASE_REG;
9649 return OP_AMD64_ADD_MEMBASE_IMM;
9652 return OP_AMD64_SUB_MEMBASE_IMM;
9655 return OP_AMD64_AND_MEMBASE_IMM;
9658 return OP_AMD64_OR_MEMBASE_IMM;
9661 return OP_AMD64_XOR_MEMBASE_IMM;
9671 op_to_op_store_membase (int store_opcode, int opcode)
9673 #if defined(__i386__) || defined(__x86_64__)
9676 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9677 return OP_X86_SETEQ_MEMBASE;
9679 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9680 return OP_X86_SETNE_MEMBASE;
9688 op_to_op_src1_membase (int load_opcode, int opcode)
9691 /* FIXME: This has sign extension issues */
9693 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9694 return OP_X86_COMPARE_MEMBASE8_IMM;
9697 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9702 return OP_X86_PUSH_MEMBASE;
9703 case OP_COMPARE_IMM:
9704 case OP_ICOMPARE_IMM:
9705 return OP_X86_COMPARE_MEMBASE_IMM;
9708 return OP_X86_COMPARE_MEMBASE_REG;
9713 /* FIXME: This has sign extension issues */
9715 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9716 return OP_X86_COMPARE_MEMBASE8_IMM;
9721 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9722 return OP_X86_PUSH_MEMBASE;
9724 /* FIXME: This only works for 32 bit immediates
9725 case OP_COMPARE_IMM:
9726 case OP_LCOMPARE_IMM:
9727 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9728 return OP_AMD64_COMPARE_MEMBASE_IMM;
9730 case OP_ICOMPARE_IMM:
9731 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9732 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9736 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9737 return OP_AMD64_COMPARE_MEMBASE_REG;
9740 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9741 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9750 op_to_op_src2_membase (int load_opcode, int opcode)
9753 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9759 return OP_X86_COMPARE_REG_MEMBASE;
9761 return OP_X86_ADD_REG_MEMBASE;
9763 return OP_X86_SUB_REG_MEMBASE;
9765 return OP_X86_AND_REG_MEMBASE;
9767 return OP_X86_OR_REG_MEMBASE;
9769 return OP_X86_XOR_REG_MEMBASE;
9776 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9777 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9781 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9782 return OP_AMD64_COMPARE_REG_MEMBASE;
9785 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9786 return OP_X86_ADD_REG_MEMBASE;
9788 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9789 return OP_X86_SUB_REG_MEMBASE;
9791 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9792 return OP_X86_AND_REG_MEMBASE;
9794 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9795 return OP_X86_OR_REG_MEMBASE;
9797 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9798 return OP_X86_XOR_REG_MEMBASE;
9800 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9801 return OP_AMD64_ADD_REG_MEMBASE;
9803 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9804 return OP_AMD64_SUB_REG_MEMBASE;
9806 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9807 return OP_AMD64_AND_REG_MEMBASE;
9809 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9810 return OP_AMD64_OR_REG_MEMBASE;
9812 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9813 return OP_AMD64_XOR_REG_MEMBASE;
9821 mono_op_to_op_imm_noemul (int opcode)
9824 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9829 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9837 return mono_op_to_op_imm (opcode);
9844 * mono_handle_global_vregs:
9846 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9850 mono_handle_global_vregs (MonoCompile *cfg)
9856 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9858 #ifdef MONO_ARCH_SIMD_INTRINSICS
9859 if (cfg->uses_simd_intrinsics)
9860 mono_simd_simplify_indirection (cfg);
9863 /* Find local vregs used in more than one bb */
9864 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9865 MonoInst *ins = bb->code;
9866 int block_num = bb->block_num;
9868 if (cfg->verbose_level > 2)
9869 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9872 for (; ins; ins = ins->next) {
9873 const char *spec = INS_INFO (ins->opcode);
9874 int regtype, regindex;
9877 if (G_UNLIKELY (cfg->verbose_level > 2))
9878 mono_print_ins (ins);
9880 g_assert (ins->opcode >= MONO_CEE_LAST);
9882 for (regindex = 0; regindex < 3; regindex ++) {
9885 if (regindex == 0) {
9886 regtype = spec [MONO_INST_DEST];
9890 } else if (regindex == 1) {
9891 regtype = spec [MONO_INST_SRC1];
9896 regtype = spec [MONO_INST_SRC2];
9902 #if SIZEOF_VOID_P == 4
9903 if (regtype == 'l') {
9905 * Since some instructions reference the original long vreg,
9906 * and some reference the two component vregs, it is quite hard
9907 * to determine when it needs to be global. So be conservative.
9909 if (!get_vreg_to_inst (cfg, vreg)) {
9910 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9912 if (cfg->verbose_level > 2)
9913 printf ("LONG VREG R%d made global.\n", vreg);
9917 * Make the component vregs volatile since the optimizations can
9918 * get confused otherwise.
9920 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9921 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9925 g_assert (vreg != -1);
9927 prev_bb = vreg_to_bb [vreg];
9929 /* 0 is a valid block num */
9930 vreg_to_bb [vreg] = block_num + 1;
9931 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9932 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9935 if (!get_vreg_to_inst (cfg, vreg)) {
9936 if (G_UNLIKELY (cfg->verbose_level > 2))
9937 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9941 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9944 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9947 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9950 g_assert_not_reached ();
9954 /* Flag as having been used in more than one bb */
9955 vreg_to_bb [vreg] = -1;
9961 /* If a variable is used in only one bblock, convert it into a local vreg */
9962 for (i = 0; i < cfg->num_varinfo; i++) {
9963 MonoInst *var = cfg->varinfo [i];
9964 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9966 switch (var->type) {
9972 #if SIZEOF_VOID_P == 8
9975 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9976 /* Enabling this screws up the fp stack on x86 */
9979 /* Arguments are implicitly global */
9980 /* Putting R4 vars into registers doesn't work currently */
9981 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
9983 * Make that the variable's liveness interval doesn't contain a call, since
9984 * that would cause the lvreg to be spilled, making the whole optimization
9987 /* This is too slow for JIT compilation */
9989 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9991 int def_index, call_index, ins_index;
9992 gboolean spilled = FALSE;
9997 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9998 const char *spec = INS_INFO (ins->opcode);
10000 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10001 def_index = ins_index;
10003 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10004 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10005 if (call_index > def_index) {
10011 if (MONO_IS_CALL (ins))
10012 call_index = ins_index;
10022 if (G_UNLIKELY (cfg->verbose_level > 2))
10023 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10024 var->flags |= MONO_INST_IS_DEAD;
10025 cfg->vreg_to_inst [var->dreg] = NULL;
10032 * Compress the varinfo and vars tables so the liveness computation is faster and
10033 * takes up less space.
10036 for (i = 0; i < cfg->num_varinfo; ++i) {
10037 MonoInst *var = cfg->varinfo [i];
10038 if (pos < i && cfg->locals_start == i)
10039 cfg->locals_start = pos;
10040 if (!(var->flags & MONO_INST_IS_DEAD)) {
10042 cfg->varinfo [pos] = cfg->varinfo [i];
10043 cfg->varinfo [pos]->inst_c0 = pos;
10044 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10045 cfg->vars [pos].idx = pos;
10046 #if SIZEOF_VOID_P == 4
10047 if (cfg->varinfo [pos]->type == STACK_I8) {
10048 /* Modify the two component vars too */
10051 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10052 var1->inst_c0 = pos;
10053 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10054 var1->inst_c0 = pos;
10061 cfg->num_varinfo = pos;
10062 if (cfg->locals_start > cfg->num_varinfo)
10063 cfg->locals_start = cfg->num_varinfo;
10067 * mono_spill_global_vars:
10069 * Generate spill code for variables which are not allocated to registers,
10070 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10071 * code is generated which could be optimized by the local optimization passes.
10074 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10076 MonoBasicBlock *bb;
10078 int orig_next_vreg;
10079 guint32 *vreg_to_lvreg;
10081 guint32 i, lvregs_len;
10082 gboolean dest_has_lvreg = FALSE;
10083 guint32 stacktypes [128];
10085 *need_local_opts = FALSE;
10087 memset (spec2, 0, sizeof (spec2));
10089 /* FIXME: Move this function to mini.c */
10090 stacktypes ['i'] = STACK_PTR;
10091 stacktypes ['l'] = STACK_I8;
10092 stacktypes ['f'] = STACK_R8;
10093 #ifdef MONO_ARCH_SIMD_INTRINSICS
10094 stacktypes ['x'] = STACK_VTYPE;
10097 #if SIZEOF_VOID_P == 4
10098 /* Create MonoInsts for longs */
10099 for (i = 0; i < cfg->num_varinfo; i++) {
10100 MonoInst *ins = cfg->varinfo [i];
10102 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10103 switch (ins->type) {
10104 #ifdef MONO_ARCH_SOFT_FLOAT
10110 g_assert (ins->opcode == OP_REGOFFSET);
10112 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10114 tree->opcode = OP_REGOFFSET;
10115 tree->inst_basereg = ins->inst_basereg;
10116 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10118 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10120 tree->opcode = OP_REGOFFSET;
10121 tree->inst_basereg = ins->inst_basereg;
10122 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10132 /* FIXME: widening and truncation */
10135 * As an optimization, when a variable allocated to the stack is first loaded into
10136 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10137 * the variable again.
10139 orig_next_vreg = cfg->next_vreg;
10140 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10141 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10144 /* Add spill loads/stores */
10145 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10148 if (cfg->verbose_level > 2)
10149 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10151 /* Clear vreg_to_lvreg array */
10152 for (i = 0; i < lvregs_len; i++)
10153 vreg_to_lvreg [lvregs [i]] = 0;
10157 MONO_BB_FOR_EACH_INS (bb, ins) {
10158 const char *spec = INS_INFO (ins->opcode);
10159 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10160 gboolean store, no_lvreg;
10162 if (G_UNLIKELY (cfg->verbose_level > 2))
10163 mono_print_ins (ins);
10165 if (ins->opcode == OP_NOP)
10169 * We handle LDADDR here as well, since it can only be decomposed
10170 * when variable addresses are known.
10172 if (ins->opcode == OP_LDADDR) {
10173 MonoInst *var = ins->inst_p0;
10175 if (var->opcode == OP_VTARG_ADDR) {
10176 /* Happens on SPARC/S390 where vtypes are passed by reference */
10177 MonoInst *vtaddr = var->inst_left;
10178 if (vtaddr->opcode == OP_REGVAR) {
10179 ins->opcode = OP_MOVE;
10180 ins->sreg1 = vtaddr->dreg;
10182 else if (var->inst_left->opcode == OP_REGOFFSET) {
10183 ins->opcode = OP_LOAD_MEMBASE;
10184 ins->inst_basereg = vtaddr->inst_basereg;
10185 ins->inst_offset = vtaddr->inst_offset;
10189 g_assert (var->opcode == OP_REGOFFSET);
10191 ins->opcode = OP_ADD_IMM;
10192 ins->sreg1 = var->inst_basereg;
10193 ins->inst_imm = var->inst_offset;
10196 *need_local_opts = TRUE;
10197 spec = INS_INFO (ins->opcode);
10200 if (ins->opcode < MONO_CEE_LAST) {
10201 mono_print_ins (ins);
10202 g_assert_not_reached ();
10206 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10210 if (MONO_IS_STORE_MEMBASE (ins)) {
10211 tmp_reg = ins->dreg;
10212 ins->dreg = ins->sreg2;
10213 ins->sreg2 = tmp_reg;
10216 spec2 [MONO_INST_DEST] = ' ';
10217 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10218 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10220 } else if (MONO_IS_STORE_MEMINDEX (ins))
10221 g_assert_not_reached ();
10226 if (G_UNLIKELY (cfg->verbose_level > 2))
10227 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10232 regtype = spec [MONO_INST_DEST];
10233 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10236 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10237 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10238 MonoInst *store_ins;
10241 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10243 if (var->opcode == OP_REGVAR) {
10244 ins->dreg = var->dreg;
10245 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10247 * Instead of emitting a load+store, use a _membase opcode.
10249 g_assert (var->opcode == OP_REGOFFSET);
10250 if (ins->opcode == OP_MOVE) {
10253 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10254 ins->inst_basereg = var->inst_basereg;
10255 ins->inst_offset = var->inst_offset;
10258 spec = INS_INFO (ins->opcode);
10262 g_assert (var->opcode == OP_REGOFFSET);
10264 prev_dreg = ins->dreg;
10266 /* Invalidate any previous lvreg for this vreg */
10267 vreg_to_lvreg [ins->dreg] = 0;
10271 #ifdef MONO_ARCH_SOFT_FLOAT
10272 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10274 store_opcode = OP_STOREI8_MEMBASE_REG;
10278 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10280 if (regtype == 'l') {
10281 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10282 mono_bblock_insert_after_ins (bb, ins, store_ins);
10283 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10284 mono_bblock_insert_after_ins (bb, ins, store_ins);
10287 g_assert (store_opcode != OP_STOREV_MEMBASE);
10289 /* Try to fuse the store into the instruction itself */
10290 /* FIXME: Add more instructions */
10291 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10292 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10293 ins->inst_imm = ins->inst_c0;
10294 ins->inst_destbasereg = var->inst_basereg;
10295 ins->inst_offset = var->inst_offset;
10296 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10297 ins->opcode = store_opcode;
10298 ins->inst_destbasereg = var->inst_basereg;
10299 ins->inst_offset = var->inst_offset;
10303 tmp_reg = ins->dreg;
10304 ins->dreg = ins->sreg2;
10305 ins->sreg2 = tmp_reg;
10308 spec2 [MONO_INST_DEST] = ' ';
10309 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10310 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10312 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10313 // FIXME: The backends expect the base reg to be in inst_basereg
10314 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10316 ins->inst_basereg = var->inst_basereg;
10317 ins->inst_offset = var->inst_offset;
10318 spec = INS_INFO (ins->opcode);
10320 /* printf ("INS: "); mono_print_ins (ins); */
10321 /* Create a store instruction */
10322 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10324 /* Insert it after the instruction */
10325 mono_bblock_insert_after_ins (bb, ins, store_ins);
10328 * We can't assign ins->dreg to var->dreg here, since the
10329 * sregs could use it. So set a flag, and do it after
10332 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10333 dest_has_lvreg = TRUE;
10342 for (srcindex = 0; srcindex < 2; ++srcindex) {
10343 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10344 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10346 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10347 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10348 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10349 MonoInst *load_ins;
10350 guint32 load_opcode;
10352 if (var->opcode == OP_REGVAR) {
10354 ins->sreg1 = var->dreg;
10356 ins->sreg2 = var->dreg;
10360 g_assert (var->opcode == OP_REGOFFSET);
10362 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10364 g_assert (load_opcode != OP_LOADV_MEMBASE);
10366 if (vreg_to_lvreg [sreg]) {
10367 /* The variable is already loaded to an lvreg */
10368 if (G_UNLIKELY (cfg->verbose_level > 2))
10369 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10371 ins->sreg1 = vreg_to_lvreg [sreg];
10373 ins->sreg2 = vreg_to_lvreg [sreg];
10377 /* Try to fuse the load into the instruction */
10378 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10379 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10380 ins->inst_basereg = var->inst_basereg;
10381 ins->inst_offset = var->inst_offset;
10382 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10383 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10384 ins->sreg2 = var->inst_basereg;
10385 ins->inst_offset = var->inst_offset;
10387 if (MONO_IS_REAL_MOVE (ins)) {
10388 ins->opcode = OP_NOP;
10391 //printf ("%d ", srcindex); mono_print_ins (ins);
10393 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10395 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10396 if (var->dreg == prev_dreg) {
10398 * sreg refers to the value loaded by the load
10399 * emitted below, but we need to use ins->dreg
10400 * since it refers to the store emitted earlier.
10404 vreg_to_lvreg [var->dreg] = sreg;
10405 g_assert (lvregs_len < 1024);
10406 lvregs [lvregs_len ++] = var->dreg;
10415 if (regtype == 'l') {
10416 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10417 mono_bblock_insert_before_ins (bb, ins, load_ins);
10418 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10419 mono_bblock_insert_before_ins (bb, ins, load_ins);
10422 #if SIZEOF_VOID_P == 4
10423 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10425 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10426 mono_bblock_insert_before_ins (bb, ins, load_ins);
10432 if (dest_has_lvreg) {
10433 vreg_to_lvreg [prev_dreg] = ins->dreg;
10434 g_assert (lvregs_len < 1024);
10435 lvregs [lvregs_len ++] = prev_dreg;
10436 dest_has_lvreg = FALSE;
10440 tmp_reg = ins->dreg;
10441 ins->dreg = ins->sreg2;
10442 ins->sreg2 = tmp_reg;
10445 if (MONO_IS_CALL (ins)) {
10446 /* Clear vreg_to_lvreg array */
10447 for (i = 0; i < lvregs_len; i++)
10448 vreg_to_lvreg [lvregs [i]] = 0;
10452 if (cfg->verbose_level > 2)
10453 mono_print_ins_index (1, ins);
10460 * - use 'iadd' instead of 'int_add'
10461 * - handling ovf opcodes: decompose in method_to_ir.
10462 * - unify iregs/fregs
10463 * -> partly done, the missing parts are:
10464 * - a more complete unification would involve unifying the hregs as well, so
10465 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10466 * would no longer map to the machine hregs, so the code generators would need to
10467 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10468 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10469 * fp/non-fp branches speeds it up by about 15%.
10470 * - use sext/zext opcodes instead of shifts
10472 * - get rid of TEMPLOADs if possible and use vregs instead
10473 * - clean up usage of OP_P/OP_ opcodes
10474 * - cleanup usage of DUMMY_USE
10475 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10477 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10478 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10479 * - make sure handle_stack_args () is called before the branch is emitted
10480 * - when the new IR is done, get rid of all unused stuff
10481 * - COMPARE/BEQ as separate instructions or unify them ?
10482 * - keeping them separate allows specialized compare instructions like
10483 * compare_imm, compare_membase
10484 * - most back ends unify fp compare+branch, fp compare+ceq
10485 * - integrate mono_save_args into inline_method
10486 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10487 * - handle long shift opts on 32 bit platforms somehow: they require
10488 * 3 sregs (2 for arg1 and 1 for arg2)
10489 * - make byref a 'normal' type.
10490 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10491 * variable if needed.
10492 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10493 * like inline_method.
10494 * - remove inlining restrictions
10495 * - fix LNEG and enable cfold of INEG
10496 * - generalize x86 optimizations like ldelema as a peephole optimization
10497 * - add store_mem_imm for amd64
10498 * - optimize the loading of the interruption flag in the managed->native wrappers
10499 * - avoid special handling of OP_NOP in passes
10500 * - move code inserting instructions into one function/macro.
10501 * - try a coalescing phase after liveness analysis
10502 * - add float -> vreg conversion + local optimizations on !x86
10503 * - figure out how to handle decomposed branches during optimizations, ie.
10504 * compare+branch, op_jump_table+op_br etc.
10505 * - promote RuntimeXHandles to vregs
10506 * - vtype cleanups:
10507 * - add a NEW_VARLOADA_VREG macro
10508 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10509 * accessing vtype fields.
10510 * - get rid of I8CONST on 64 bit platforms
10511 * - dealing with the increase in code size due to branches created during opcode
10513 * - use extended basic blocks
10514 * - all parts of the JIT
10515 * - handle_global_vregs () && local regalloc
10516 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10517 * - sources of increase in code size:
10520 * - isinst and castclass
10521 * - lvregs not allocated to global registers even if used multiple times
10522 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10524 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10525 * - add all micro optimizations from the old JIT
10526 * - put tree optimizations into the deadce pass
10527 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10528 * specific function.
10529 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10530 * fcompare + branchCC.
10531 * - create a helper function for allocating a stack slot, taking into account
10532 * MONO_CFG_HAS_SPILLUP.
10533 * - merge new GC changes in mini.c.
10535 * - merge the ia64 switch changes.
10536 * - merge the mips conditional changes.
10537 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10538 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10539 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10540 * - optimize mono_regstate2_alloc_int/float.
10541 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10542 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10543 * parts of the tree could be separated by other instructions, killing the tree
10544 * arguments, or stores killing loads etc. Also, should we fold loads into other
10545 * instructions if the result of the load is used multiple times ?
10546 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10547 * - LAST MERGE: 108395.
10548 * - when returning vtypes in registers, generate IR and append it to the end of the
10549 * last bb instead of doing it in the epilog.
10550 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10551 * ones in inssel.h.
10552 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10560 - When to decompose opcodes:
10561 - earlier: this makes some optimizations hard to implement, since the low level IR
10562 no longer contains the neccessary information. But it is easier to do.
10563 - later: harder to implement, enables more optimizations.
10564 - Branches inside bblocks:
10565 - created when decomposing complex opcodes.
10566 - branches to another bblock: harmless, but not tracked by the branch
10567 optimizations, so need to branch to a label at the start of the bblock.
10568 - branches to inside the same bblock: very problematic, trips up the local
10569 reg allocator. Can be fixed by spitting the current bblock, but that is a
10570 complex operation, since some local vregs can become global vregs etc.
10571 - Local/global vregs:
10572 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10573 local register allocator.
10574 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10575 structure, created by mono_create_var (). Assigned to hregs or the stack by
10576 the global register allocator.
10577 - When to do optimizations like alu->alu_imm:
10578 - earlier -> saves work later on since the IR will be smaller/simpler
10579 - later -> can work on more instructions
10580 - Handling of valuetypes:
10581 - When a vtype is pushed on the stack, a new temporary is created, an
10582 instruction computing its address (LDADDR) is emitted and pushed on
10583 the stack. Need to optimize cases when the vtype is used immediately as in
10584 argument passing, stloc etc.
10585 - Instead of the to_end stuff in the old JIT, simply call the function handling
10586 the values on the stack before emitting the last instruction of the bb.
10589 #endif /* DISABLE_JIT */