2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
59 #define BRANCH_COST 100
60 #define INLINE_LENGTH_LIMIT 20
61 #define INLINE_FAILURE do {\
62 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 #define CHECK_CFG_EXCEPTION do {\
66 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 #define METHOD_ACCESS_FAILURE do { \
70 char *method_fname = mono_method_full_name (method, TRUE); \
71 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
72 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
73 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
74 g_free (method_fname); \
75 g_free (cil_method_fname); \
76 goto exception_exit; \
78 #define FIELD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *field_fname = mono_field_full_name (field); \
81 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
82 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (field_fname); \
85 goto exception_exit; \
87 #define GENERIC_SHARING_FAILURE(opcode) do { \
88 if (cfg->generic_sharing_context) { \
89 if (cfg->verbose_level > 2) \
90 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
91 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
92 goto exception_exit; \
96 /* Determine whenever 'ins' represents a load of the 'this' argument */
97 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
99 static int ldind_to_load_membase (int opcode);
100 static int stind_to_store_membase (int opcode);
102 int mono_op_to_op_imm (int opcode);
103 int mono_op_to_op_imm_noemul (int opcode);
105 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
106 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
107 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
109 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
110 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
111 guint inline_offset, gboolean is_virtual_call);
113 /* helper methods signature */
114 extern MonoMethodSignature *helper_sig_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_domain_get;
116 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 * Instruction metadata
125 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
131 #if SIZEOF_VOID_P == 8
136 /* keep in sync with the enum in mini.h */
139 #include "mini-ops.h"
143 extern GHashTable *jit_icall_name_hash;
145 #define MONO_INIT_VARINFO(vi,id) do { \
146 (vi)->range.first_use.pos.bid = 0xffff; \
152 mono_alloc_ireg (MonoCompile *cfg)
154 return alloc_ireg (cfg);
158 mono_alloc_freg (MonoCompile *cfg)
160 return alloc_freg (cfg);
164 mono_alloc_preg (MonoCompile *cfg)
166 return alloc_preg (cfg);
170 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
172 return alloc_dreg (cfg, stack_type);
176 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
182 switch (type->type) {
185 case MONO_TYPE_BOOLEAN:
197 case MONO_TYPE_FNPTR:
199 case MONO_TYPE_CLASS:
200 case MONO_TYPE_STRING:
201 case MONO_TYPE_OBJECT:
202 case MONO_TYPE_SZARRAY:
203 case MONO_TYPE_ARRAY:
207 #if SIZEOF_VOID_P == 8
216 case MONO_TYPE_VALUETYPE:
217 if (type->data.klass->enumtype) {
218 type = type->data.klass->enum_basetype;
221 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
224 case MONO_TYPE_TYPEDBYREF:
226 case MONO_TYPE_GENERICINST:
227 type = &type->data.generic_class->container_class->byval_arg;
231 g_assert (cfg->generic_sharing_context);
234 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
240 mono_print_bb (MonoBasicBlock *bb, const char *msg)
245 printf ("\n%s %d: [IN: ", msg, bb->block_num);
246 for (i = 0; i < bb->in_count; ++i)
247 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
249 for (i = 0; i < bb->out_count; ++i)
250 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
252 for (tree = bb->code; tree; tree = tree->next)
253 mono_print_ins_index (-1, tree);
257 * Can't put this at the beginning, since other files reference stuff from this
262 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
264 #define GET_BBLOCK(cfg,tblock,ip) do { \
265 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
267 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
268 NEW_BBLOCK (cfg, (tblock)); \
269 (tblock)->cil_code = (ip); \
270 ADD_BBLOCK (cfg, (tblock)); \
274 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
275 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
276 int _length_reg = alloc_ireg (cfg); \
277 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
278 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
279 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
283 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
284 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
285 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
288 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
289 ins->sreg1 = array_reg; \
290 ins->sreg2 = index_reg; \
291 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
292 MONO_ADD_INS ((cfg)->cbb, ins); \
293 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
294 (cfg)->cbb->has_array_access = TRUE; \
298 #if defined(__i386__) || defined(__x86_64__)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_VOID_P == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 /* first search for handlers and filters */
473 for (i = 0; i < header->num_clauses; ++i) {
474 clause = &header->clauses [i];
475 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
476 (offset < (clause->handler_offset)))
477 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
479 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
480 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
481 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
482 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
483 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
485 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
489 /* search the try blocks */
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
493 return ((i + 1) << 8) | clause->flags;
500 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
502 MonoMethod *method = cfg->method;
503 MonoMethodHeader *header = mono_method_get_header (method);
504 MonoExceptionClause *clause;
505 MonoBasicBlock *handler;
509 for (i = 0; i < header->num_clauses; ++i) {
510 clause = &header->clauses [i];
511 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
512 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
513 if (clause->flags == type) {
514 handler = cfg->cil_offset_to_bb [clause->handler_offset];
516 res = g_list_append (res, handler);
524 mono_create_spvar_for_region (MonoCompile *cfg, int region)
528 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
532 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
533 /* prevent it from being register allocated */
534 var->flags |= MONO_INST_INDIRECT;
536 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
540 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
542 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
550 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
555 /* prevent it from being register allocated */
556 var->flags |= MONO_INST_INDIRECT;
558 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
564 * Returns the type used in the eval stack when @type is loaded.
565 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
568 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
572 inst->klass = klass = mono_class_from_mono_type (type);
574 inst->type = STACK_MP;
579 switch (type->type) {
581 inst->type = STACK_INV;
585 case MONO_TYPE_BOOLEAN:
591 inst->type = STACK_I4;
596 case MONO_TYPE_FNPTR:
597 inst->type = STACK_PTR;
599 case MONO_TYPE_CLASS:
600 case MONO_TYPE_STRING:
601 case MONO_TYPE_OBJECT:
602 case MONO_TYPE_SZARRAY:
603 case MONO_TYPE_ARRAY:
604 inst->type = STACK_OBJ;
608 inst->type = STACK_I8;
612 inst->type = STACK_R8;
614 case MONO_TYPE_VALUETYPE:
615 if (type->data.klass->enumtype) {
616 type = type->data.klass->enum_basetype;
620 inst->type = STACK_VTYPE;
623 case MONO_TYPE_TYPEDBYREF:
624 inst->klass = mono_defaults.typed_reference_class;
625 inst->type = STACK_VTYPE;
627 case MONO_TYPE_GENERICINST:
628 type = &type->data.generic_class->container_class->byval_arg;
631 case MONO_TYPE_MVAR :
632 /* FIXME: all the arguments must be references for now,
633 * later look inside cfg and see if the arg num is
636 g_assert (cfg->generic_sharing_context);
637 inst->type = STACK_OBJ;
640 g_error ("unknown type 0x%02x in eval stack type", type->type);
645 * The following tables are used to quickly validate the IL code in type_from_op ().
648 bin_num_table [STACK_MAX] [STACK_MAX] = {
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
653 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
654 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
655 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
661 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
664 /* reduce the size of this table */
666 bin_int_table [STACK_MAX] [STACK_MAX] = {
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
678 bin_comp_table [STACK_MAX] [STACK_MAX] = {
679 /* Inv i L p F & O vt */
681 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
682 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
683 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
684 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
685 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
686 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
687 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
690 /* reduce the size of this table */
692 shift_table [STACK_MAX] [STACK_MAX] = {
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
704 * Tables to map from the non-specific opcode to the matching
705 * type-specific opcode.
707 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
709 binops_op_map [STACK_MAX] = {
710 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
713 /* handles from CEE_NEG to CEE_CONV_U8 */
715 unops_op_map [STACK_MAX] = {
716 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
719 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
721 ovfops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
725 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
727 ovf2ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
731 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
733 ovf3ops_op_map [STACK_MAX] = {
734 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
737 /* handles from CEE_BEQ to CEE_BLT_UN */
739 beqops_op_map [STACK_MAX] = {
740 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
743 /* handles from CEE_CEQ to CEE_CLT_UN */
745 ceqops_op_map [STACK_MAX] = {
746 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
750 * Sets ins->type (the type on the eval stack) according to the
751 * type of the opcode and the arguments to it.
752 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
754 * FIXME: this function sets ins->type unconditionally in some cases, but
755 * it should set it to invalid for some types (a conv.x on an object)
758 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
760 switch (ins->opcode) {
767 /* FIXME: check unverifiable args for STACK_MP */
768 ins->type = bin_num_table [src1->type] [src2->type];
769 ins->opcode += binops_op_map [ins->type];
776 ins->type = bin_int_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
782 ins->type = shift_table [src1->type] [src2->type];
783 ins->opcode += binops_op_map [ins->type];
788 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
789 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
790 ins->opcode = OP_LCOMPARE;
791 else if (src1->type == STACK_R8)
792 ins->opcode = OP_FCOMPARE;
794 ins->opcode = OP_ICOMPARE;
796 case OP_ICOMPARE_IMM:
797 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
798 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
799 ins->opcode = OP_LCOMPARE_IMM;
811 ins->opcode += beqops_op_map [src1->type];
814 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
822 ins->opcode += ceqops_op_map [src1->type];
826 ins->type = neg_table [src1->type];
827 ins->opcode += unops_op_map [ins->type];
830 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
831 ins->type = src1->type;
833 ins->type = STACK_INV;
834 ins->opcode += unops_op_map [ins->type];
840 ins->type = STACK_I4;
841 ins->opcode += unops_op_map [src1->type];
844 ins->type = STACK_R8;
845 switch (src1->type) {
848 ins->opcode = OP_ICONV_TO_R_UN;
851 ins->opcode = OP_LCONV_TO_R_UN;
855 case CEE_CONV_OVF_I1:
856 case CEE_CONV_OVF_U1:
857 case CEE_CONV_OVF_I2:
858 case CEE_CONV_OVF_U2:
859 case CEE_CONV_OVF_I4:
860 case CEE_CONV_OVF_U4:
861 ins->type = STACK_I4;
862 ins->opcode += ovf3ops_op_map [src1->type];
864 case CEE_CONV_OVF_I_UN:
865 case CEE_CONV_OVF_U_UN:
866 ins->type = STACK_PTR;
867 ins->opcode += ovf2ops_op_map [src1->type];
869 case CEE_CONV_OVF_I1_UN:
870 case CEE_CONV_OVF_I2_UN:
871 case CEE_CONV_OVF_I4_UN:
872 case CEE_CONV_OVF_U1_UN:
873 case CEE_CONV_OVF_U2_UN:
874 case CEE_CONV_OVF_U4_UN:
875 ins->type = STACK_I4;
876 ins->opcode += ovf2ops_op_map [src1->type];
879 ins->type = STACK_PTR;
880 switch (src1->type) {
882 ins->opcode = OP_MOVE;
886 #if SIZEOF_VOID_P == 8
887 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_MOVE;
893 ins->opcode = OP_LCONV_TO_U;
896 ins->opcode = OP_FCONV_TO_U;
902 ins->type = STACK_I8;
903 ins->opcode += unops_op_map [src1->type];
905 case CEE_CONV_OVF_I8:
906 case CEE_CONV_OVF_U8:
907 ins->type = STACK_I8;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_U8_UN:
911 case CEE_CONV_OVF_I8_UN:
912 ins->type = STACK_I8;
913 ins->opcode += ovf2ops_op_map [src1->type];
917 ins->type = STACK_R8;
918 ins->opcode += unops_op_map [src1->type];
921 ins->type = STACK_R8;
925 ins->type = STACK_I4;
926 ins->opcode += ovfops_op_map [src1->type];
931 ins->type = STACK_PTR;
932 ins->opcode += ovfops_op_map [src1->type];
940 ins->type = bin_num_table [src1->type] [src2->type];
941 ins->opcode += ovfops_op_map [src1->type];
942 if (ins->type == STACK_R8)
943 ins->type = STACK_INV;
945 case OP_LOAD_MEMBASE:
946 ins->type = STACK_PTR;
948 case OP_LOADI1_MEMBASE:
949 case OP_LOADU1_MEMBASE:
950 case OP_LOADI2_MEMBASE:
951 case OP_LOADU2_MEMBASE:
952 case OP_LOADI4_MEMBASE:
953 case OP_LOADU4_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI8_MEMBASE:
957 ins->type = STACK_I8;
959 case OP_LOADR4_MEMBASE:
960 case OP_LOADR8_MEMBASE:
961 ins->type = STACK_R8;
964 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
968 if (ins->type == STACK_MP)
969 ins->klass = mono_defaults.object_class;
974 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
980 param_table [STACK_MAX] [STACK_MAX] = {
985 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
989 switch (args->type) {
999 for (i = 0; i < sig->param_count; ++i) {
1000 switch (args [i].type) {
1004 if (!sig->params [i]->byref)
1008 if (sig->params [i]->byref)
1010 switch (sig->params [i]->type) {
1011 case MONO_TYPE_CLASS:
1012 case MONO_TYPE_STRING:
1013 case MONO_TYPE_OBJECT:
1014 case MONO_TYPE_SZARRAY:
1015 case MONO_TYPE_ARRAY:
1022 if (sig->params [i]->byref)
1024 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1033 /*if (!param_table [args [i].type] [sig->params [i]->type])
1041 * When we need a pointer to the current domain many times in a method, we
1042 * call mono_domain_get() once and we store the result in a local variable.
1043 * This function returns the variable that represents the MonoDomain*.
1045 inline static MonoInst *
1046 mono_get_domainvar (MonoCompile *cfg)
1048 if (!cfg->domainvar)
1049 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 return cfg->domainvar;
1054 * The got_var contains the address of the Global Offset Table when AOT
1057 inline static MonoInst *
1058 mono_get_got_var (MonoCompile *cfg)
1060 #ifdef MONO_ARCH_NEED_GOT_VAR
1061 if (!cfg->compile_aot)
1063 if (!cfg->got_var) {
1064 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1066 return cfg->got_var;
1073 mono_get_vtable_var (MonoCompile *cfg)
1075 g_assert (cfg->generic_sharing_context);
1077 if (!cfg->rgctx_var) {
1078 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1079 /* force the var to be stack allocated */
1080 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1083 return cfg->rgctx_var;
1087 type_from_stack_type (MonoInst *ins) {
1088 switch (ins->type) {
1089 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1090 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1091 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1092 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1094 return &ins->klass->this_arg;
1095 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1096 case STACK_VTYPE: return &ins->klass->byval_arg;
1098 g_error ("stack type %d to monotype not handled\n", ins->type);
1103 static G_GNUC_UNUSED int
1104 type_to_stack_type (MonoType *t)
1106 switch (mono_type_get_underlying_type (t)->type) {
1109 case MONO_TYPE_BOOLEAN:
1112 case MONO_TYPE_CHAR:
1119 case MONO_TYPE_FNPTR:
1121 case MONO_TYPE_CLASS:
1122 case MONO_TYPE_STRING:
1123 case MONO_TYPE_OBJECT:
1124 case MONO_TYPE_SZARRAY:
1125 case MONO_TYPE_ARRAY:
1133 case MONO_TYPE_VALUETYPE:
1134 case MONO_TYPE_TYPEDBYREF:
1136 case MONO_TYPE_GENERICINST:
1137 if (mono_type_generic_inst_is_valuetype (t))
1143 g_assert_not_reached ();
1150 array_access_to_klass (int opcode)
1154 return mono_defaults.byte_class;
1156 return mono_defaults.uint16_class;
1159 return mono_defaults.int_class;
1162 return mono_defaults.sbyte_class;
1165 return mono_defaults.int16_class;
1168 return mono_defaults.int32_class;
1170 return mono_defaults.uint32_class;
1173 return mono_defaults.int64_class;
1176 return mono_defaults.single_class;
1179 return mono_defaults.double_class;
1180 case CEE_LDELEM_REF:
1181 case CEE_STELEM_REF:
1182 return mono_defaults.object_class;
1184 g_assert_not_reached ();
1190 * We try to share variables when possible
1193 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1198 /* inlining can result in deeper stacks */
1199 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1200 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1202 pos = ins->type - 1 + slot * STACK_MAX;
1204 switch (ins->type) {
1211 if ((vnum = cfg->intvars [pos]))
1212 return cfg->varinfo [vnum];
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 cfg->intvars [pos] = res->inst_c0;
1217 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1225 if (cfg->compile_aot) {
1226 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1227 jump_info_token->image = image;
1228 jump_info_token->token = token;
1229 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1234 * This function is called to handle items that are left on the evaluation stack
1235 * at basic block boundaries. What happens is that we save the values to local variables
1236 * and we reload them later when first entering the target basic block (with the
1237 * handle_loaded_temps () function).
1238 * A single joint point will use the same variables (stored in the array bb->out_stack or
1239 * bb->in_stack, if the basic block is before or after the joint point).
1241 * This function needs to be called _before_ emitting the last instruction of
1242 * the bb (i.e. before emitting a branch).
1243 * If the stack merge fails at a join point, cfg->unverifiable is set.
1246 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1249 MonoBasicBlock *bb = cfg->cbb;
1250 MonoBasicBlock *outb;
1251 MonoInst *inst, **locals;
1256 if (cfg->verbose_level > 3)
1257 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1258 if (!bb->out_scount) {
1259 bb->out_scount = count;
1260 //printf ("bblock %d has out:", bb->block_num);
1262 for (i = 0; i < bb->out_count; ++i) {
1263 outb = bb->out_bb [i];
1264 /* exception handlers are linked, but they should not be considered for stack args */
1265 if (outb->flags & BB_EXCEPTION_HANDLER)
1267 //printf (" %d", outb->block_num);
1268 if (outb->in_stack) {
1270 bb->out_stack = outb->in_stack;
1276 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1277 for (i = 0; i < count; ++i) {
1279 * try to reuse temps already allocated for this purpouse, if they occupy the same
1280 * stack slot and if they are of the same type.
1281 * This won't cause conflicts since if 'local' is used to
1282 * store one of the values in the in_stack of a bblock, then
1283 * the same variable will be used for the same outgoing stack
1285 * This doesn't work when inlining methods, since the bblocks
1286 * in the inlined methods do not inherit their in_stack from
1287 * the bblock they are inlined to. See bug #58863 for an
1290 if (cfg->inlined_method)
1291 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1293 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1298 for (i = 0; i < bb->out_count; ++i) {
1299 outb = bb->out_bb [i];
1300 /* exception handlers are linked, but they should not be considered for stack args */
1301 if (outb->flags & BB_EXCEPTION_HANDLER)
1303 if (outb->in_scount) {
1304 if (outb->in_scount != bb->out_scount) {
1305 cfg->unverifiable = TRUE;
1308 continue; /* check they are the same locals */
1310 outb->in_scount = count;
1311 outb->in_stack = bb->out_stack;
1314 locals = bb->out_stack;
1316 for (i = 0; i < count; ++i) {
1317 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1318 inst->cil_code = sp [i]->cil_code;
1319 sp [i] = locals [i];
1320 if (cfg->verbose_level > 3)
1321 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1325 * It is possible that the out bblocks already have in_stack assigned, and
1326 * the in_stacks differ. In this case, we will store to all the different
1333 /* Find a bblock which has a different in_stack */
1335 while (bindex < bb->out_count) {
1336 outb = bb->out_bb [bindex];
1337 /* exception handlers are linked, but they should not be considered for stack args */
1338 if (outb->flags & BB_EXCEPTION_HANDLER) {
1342 if (outb->in_stack != locals) {
1343 for (i = 0; i < count; ++i) {
1344 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1345 inst->cil_code = sp [i]->cil_code;
1346 sp [i] = locals [i];
1347 if (cfg->verbose_level > 3)
1348 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1350 locals = outb->in_stack;
1359 /* Emit code which loads interface_offsets [klass->interface_id]
1360 * The array is stored in memory before vtable.
1363 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1365 if (cfg->compile_aot) {
1366 int ioffset_reg = alloc_preg (cfg);
1367 int iid_reg = alloc_preg (cfg);
1369 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1370 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1379 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1380 * stored in "klass_reg" implements the interface "klass".
1383 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1385 int ibitmap_reg = alloc_preg (cfg);
1386 int ibitmap_byte_reg = alloc_preg (cfg);
1388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1390 if (cfg->compile_aot) {
1391 int iid_reg = alloc_preg (cfg);
1392 int shifted_iid_reg = alloc_preg (cfg);
1393 int ibitmap_byte_address_reg = alloc_preg (cfg);
1394 int masked_iid_reg = alloc_preg (cfg);
1395 int iid_one_bit_reg = alloc_preg (cfg);
1396 int iid_bit_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1399 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1402 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1406 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1412 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1413 * stored in "vtable_reg" implements the interface "klass".
1416 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1418 int ibitmap_reg = alloc_preg (cfg);
1419 int ibitmap_byte_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1423 if (cfg->compile_aot) {
1424 int iid_reg = alloc_preg (cfg);
1425 int shifted_iid_reg = alloc_preg (cfg);
1426 int ibitmap_byte_address_reg = alloc_preg (cfg);
1427 int masked_iid_reg = alloc_preg (cfg);
1428 int iid_one_bit_reg = alloc_preg (cfg);
1429 int iid_bit_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1432 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1435 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1439 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1445 * Emit code which checks whenever the interface id of @klass is smaller than
1446 * than the value given by max_iid_reg.
1449 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1450 MonoBasicBlock *false_target)
1452 if (cfg->compile_aot) {
1453 int iid_reg = alloc_preg (cfg);
1454 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1455 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1458 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1462 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1465 /* Same as above, but obtains max_iid from a vtable */
1467 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1468 MonoBasicBlock *false_target)
1470 int max_iid_reg = alloc_preg (cfg);
1472 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1473 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1476 /* Same as above, but obtains max_iid from a klass */
1478 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1479 MonoBasicBlock *false_target)
1481 int max_iid_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1484 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1488 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int idepth_reg = alloc_preg (cfg);
1491 int stypes_reg = alloc_preg (cfg);
1492 int stype = alloc_preg (cfg);
1494 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1501 if (cfg->compile_aot) {
1502 int const_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1545 if (cfg->compile_aot) {
1546 int const_reg = alloc_preg (cfg);
1547 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1548 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1556 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1558 if (cfg->compile_aot) {
1559 int const_reg = alloc_preg (cfg);
1560 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1569 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1572 int rank_reg = alloc_preg (cfg);
1573 int eclass_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1577 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1578 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1580 if (klass->cast_class == mono_defaults.object_class) {
1581 int parent_reg = alloc_preg (cfg);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1583 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1584 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1585 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1586 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class == mono_defaults.enum_class) {
1589 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1590 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1591 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1593 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1594 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1597 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1598 /* Check that the object is a vector too */
1599 int bounds_reg = alloc_preg (cfg);
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1602 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1605 int idepth_reg = alloc_preg (cfg);
1606 int stypes_reg = alloc_preg (cfg);
1607 int stype = alloc_preg (cfg);
1609 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1610 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1616 mini_emit_class_check (cfg, stype, klass);
1621 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1625 g_assert (val == 0);
1630 if ((size <= 4) && (size <= align)) {
1633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1636 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1639 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1641 #if SIZEOF_VOID_P == 8
1643 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1649 val_reg = alloc_preg (cfg);
1651 if (sizeof (gpointer) == 8)
1652 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1654 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1657 /* This could be optimized further if neccesary */
1659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1666 #if !NO_UNALIGNED_ACCESS
1667 if (sizeof (gpointer) == 8) {
1669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1698 #endif /* DISABLE_JIT */
1701 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1709 /* This could be optimized further if neccesary */
1711 cur_reg = alloc_preg (cfg);
1712 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1720 #if !NO_UNALIGNED_ACCESS
1721 if (sizeof (gpointer) == 8) {
1723 cur_reg = alloc_preg (cfg);
1724 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1725 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1734 cur_reg = alloc_preg (cfg);
1735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1742 cur_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1750 cur_reg = alloc_preg (cfg);
1751 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1762 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1765 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1768 type = mini_get_basic_type_from_generic (gsctx, type);
1769 switch (type->type) {
1770 case MONO_TYPE_VOID:
1771 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1774 case MONO_TYPE_BOOLEAN:
1777 case MONO_TYPE_CHAR:
1780 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_FNPTR:
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 case MONO_TYPE_CLASS:
1787 case MONO_TYPE_STRING:
1788 case MONO_TYPE_OBJECT:
1789 case MONO_TYPE_SZARRAY:
1790 case MONO_TYPE_ARRAY:
1791 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1794 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1797 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1798 case MONO_TYPE_VALUETYPE:
1799 if (type->data.klass->enumtype) {
1800 type = type->data.klass->enum_basetype;
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_TYPEDBYREF:
1805 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1806 case MONO_TYPE_GENERICINST:
1807 type = &type->data.generic_class->container_class->byval_arg;
1810 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1816 * target_type_is_incompatible:
1817 * @cfg: MonoCompile context
1819 * Check that the item @arg on the evaluation stack can be stored
1820 * in the target type (can be a local, or field, etc).
1821 * The cfg arg can be used to check if we need verification or just
1824 * Returns: non-0 value if arg can't be stored on a target.
1827 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1829 MonoType *simple_type;
1832 if (target->byref) {
1833 /* FIXME: check that the pointed to types match */
1834 if (arg->type == STACK_MP)
1835 return arg->klass != mono_class_from_mono_type (target);
1836 if (arg->type == STACK_PTR)
1841 simple_type = mono_type_get_underlying_type (target);
1842 switch (simple_type->type) {
1843 case MONO_TYPE_VOID:
1847 case MONO_TYPE_BOOLEAN:
1850 case MONO_TYPE_CHAR:
1853 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1857 /* STACK_MP is needed when setting pinned locals */
1858 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1863 case MONO_TYPE_FNPTR:
1864 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1867 case MONO_TYPE_CLASS:
1868 case MONO_TYPE_STRING:
1869 case MONO_TYPE_OBJECT:
1870 case MONO_TYPE_SZARRAY:
1871 case MONO_TYPE_ARRAY:
1872 if (arg->type != STACK_OBJ)
1874 /* FIXME: check type compatibility */
1878 if (arg->type != STACK_I8)
1883 if (arg->type != STACK_R8)
1886 case MONO_TYPE_VALUETYPE:
1887 if (arg->type != STACK_VTYPE)
1889 klass = mono_class_from_mono_type (simple_type);
1890 if (klass != arg->klass)
1893 case MONO_TYPE_TYPEDBYREF:
1894 if (arg->type != STACK_VTYPE)
1896 klass = mono_class_from_mono_type (simple_type);
1897 if (klass != arg->klass)
1900 case MONO_TYPE_GENERICINST:
1901 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1902 if (arg->type != STACK_VTYPE)
1904 klass = mono_class_from_mono_type (simple_type);
1905 if (klass != arg->klass)
1909 if (arg->type != STACK_OBJ)
1911 /* FIXME: check type compatibility */
1915 case MONO_TYPE_MVAR:
1916 /* FIXME: all the arguments must be references for now,
1917 * later look inside cfg and see if the arg num is
1918 * really a reference
1920 g_assert (cfg->generic_sharing_context);
1921 if (arg->type != STACK_OBJ)
1925 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1931 * Prepare arguments for passing to a function call.
1932 * Return a non-zero value if the arguments can't be passed to the given
1934 * The type checks are not yet complete and some conversions may need
1935 * casts on 32 or 64 bit architectures.
1937 * FIXME: implement this using target_type_is_incompatible ()
1940 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1942 MonoType *simple_type;
1946 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1950 for (i = 0; i < sig->param_count; ++i) {
1951 if (sig->params [i]->byref) {
1952 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1956 simple_type = sig->params [i];
1957 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1959 switch (simple_type->type) {
1960 case MONO_TYPE_VOID:
1965 case MONO_TYPE_BOOLEAN:
1968 case MONO_TYPE_CHAR:
1971 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1977 case MONO_TYPE_FNPTR:
1978 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1981 case MONO_TYPE_CLASS:
1982 case MONO_TYPE_STRING:
1983 case MONO_TYPE_OBJECT:
1984 case MONO_TYPE_SZARRAY:
1985 case MONO_TYPE_ARRAY:
1986 if (args [i]->type != STACK_OBJ)
1991 if (args [i]->type != STACK_I8)
1996 if (args [i]->type != STACK_R8)
1999 case MONO_TYPE_VALUETYPE:
2000 if (simple_type->data.klass->enumtype) {
2001 simple_type = simple_type->data.klass->enum_basetype;
2004 if (args [i]->type != STACK_VTYPE)
2007 case MONO_TYPE_TYPEDBYREF:
2008 if (args [i]->type != STACK_VTYPE)
2011 case MONO_TYPE_GENERICINST:
2012 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2016 g_error ("unknown type 0x%02x in check_call_signature",
2024 callvirt_to_call (int opcode)
2029 case OP_VOIDCALLVIRT:
2038 g_assert_not_reached ();
2045 callvirt_to_call_membase (int opcode)
2049 return OP_CALL_MEMBASE;
2050 case OP_VOIDCALLVIRT:
2051 return OP_VOIDCALL_MEMBASE;
2053 return OP_FCALL_MEMBASE;
2055 return OP_LCALL_MEMBASE;
2057 return OP_VCALL_MEMBASE;
2059 g_assert_not_reached ();
2065 #ifdef MONO_ARCH_HAVE_IMT
2067 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2069 #ifdef MONO_ARCH_IMT_REG
2070 int method_reg = alloc_preg (cfg);
2073 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2074 } else if (cfg->compile_aot) {
2075 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2078 MONO_INST_NEW (cfg, ins, OP_PCONST);
2079 ins->inst_p0 = call->method;
2080 ins->dreg = method_reg;
2081 MONO_ADD_INS (cfg->cbb, ins);
2084 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2086 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2091 static MonoJumpInfo *
2092 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2094 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2098 ji->data.target = target;
2103 inline static MonoInst*
2104 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2106 inline static MonoCallInst *
2107 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2108 MonoInst **args, int calli, int virtual)
2111 #ifdef MONO_ARCH_SOFT_FLOAT
2115 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2118 call->signature = sig;
2120 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2122 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2123 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2126 temp->backend.is_pinvoke = sig->pinvoke;
2129 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2130 * address of return value to increase optimization opportunities.
2131 * Before vtype decomposition, the dreg of the call ins itself represents the
2132 * fact the call modifies the return value. After decomposition, the call will
2133 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2134 * will be transformed into an LDADDR.
2136 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2137 loada->dreg = alloc_preg (cfg);
2138 loada->inst_p0 = temp;
2139 /* We reference the call too since call->dreg could change during optimization */
2140 loada->inst_p1 = call;
2141 MONO_ADD_INS (cfg->cbb, loada);
2143 call->inst.dreg = temp->dreg;
2145 call->vret_var = loada;
2146 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2147 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2149 #ifdef MONO_ARCH_SOFT_FLOAT
2151 * If the call has a float argument, we would need to do an r8->r4 conversion using
2152 * an icall, but that cannot be done during the call sequence since it would clobber
2153 * the call registers + the stack. So we do it before emitting the call.
2155 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2157 MonoInst *in = call->args [i];
2159 if (i >= sig->hasthis)
2160 t = sig->params [i - sig->hasthis];
2162 t = &mono_defaults.int_class->byval_arg;
2163 t = mono_type_get_underlying_type (t);
2165 if (!t->byref && t->type == MONO_TYPE_R4) {
2166 MonoInst *iargs [1];
2170 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2172 /* The result will be in an int vreg */
2173 call->args [i] = conv;
2178 mono_arch_emit_call (cfg, call);
2180 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2181 cfg->flags |= MONO_CFG_HAS_CALLS;
2186 inline static MonoInst*
2187 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2189 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2191 call->inst.sreg1 = addr->dreg;
2193 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2195 return (MonoInst*)call;
2198 inline static MonoInst*
2199 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2201 #ifdef MONO_ARCH_RGCTX_REG
2206 rgctx_reg = mono_alloc_preg (cfg);
2207 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2209 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2211 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2212 cfg->uses_rgctx_reg = TRUE;
2214 return (MonoInst*)call;
2216 g_assert_not_reached ();
2222 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2223 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2225 gboolean virtual = this != NULL;
2226 gboolean enable_for_aot = TRUE;
2229 if (method->string_ctor) {
2230 /* Create the real signature */
2231 /* FIXME: Cache these */
2232 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2233 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2238 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2240 if (this && sig->hasthis &&
2241 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2242 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2243 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2245 call->method = method;
2247 call->inst.flags |= MONO_INST_HAS_METHOD;
2248 call->inst.inst_left = this;
2251 int vtable_reg, slot_reg, this_reg;
2253 this_reg = this->dreg;
2255 if ((!cfg->compile_aot || enable_for_aot) &&
2256 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2257 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2258 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2260 * the method is not virtual, we just need to ensure this is not null
2261 * and then we can call the method directly.
2263 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2264 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2267 if (!method->string_ctor) {
2268 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2269 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2270 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2273 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2275 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2277 return (MonoInst*)call;
2280 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2281 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2282 /* Make a call to delegate->invoke_impl */
2283 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2284 call->inst.inst_basereg = this_reg;
2285 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2292 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2293 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2294 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2296 * the method is virtual, but we can statically dispatch since either
2297 * it's class or the method itself are sealed.
2298 * But first we need to ensure it's not a null reference.
2300 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2301 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2302 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2304 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2305 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2307 return (MonoInst*)call;
2310 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2312 /* Initialize method->slot */
2313 mono_class_setup_vtable (method->klass);
2315 vtable_reg = alloc_preg (cfg);
2316 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2317 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2319 #ifdef MONO_ARCH_HAVE_IMT
2321 guint32 imt_slot = mono_method_get_imt_slot (method);
2322 emit_imt_argument (cfg, call, imt_arg);
2323 slot_reg = vtable_reg;
2324 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2327 if (slot_reg == -1) {
2328 slot_reg = alloc_preg (cfg);
2329 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2330 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2333 slot_reg = vtable_reg;
2334 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2335 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2337 g_assert (mono_method_signature (method)->generic_param_count);
2338 emit_imt_argument (cfg, call, imt_arg);
2342 call->inst.sreg1 = slot_reg;
2343 call->virtual = TRUE;
2346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 return (MonoInst*)call;
2352 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2353 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2360 #ifdef MONO_ARCH_RGCTX_REG
2361 rgctx_reg = mono_alloc_preg (cfg);
2362 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2367 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2369 call = (MonoCallInst*)ins;
2371 #ifdef MONO_ARCH_RGCTX_REG
2372 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2373 cfg->uses_rgctx_reg = TRUE;
2382 static inline MonoInst*
2383 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2385 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2389 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2396 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2399 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2401 return (MonoInst*)call;
2404 inline static MonoInst*
2405 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2407 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2411 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2415 * mono_emit_abs_call:
2417 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2419 inline static MonoInst*
2420 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2421 MonoMethodSignature *sig, MonoInst **args)
2423 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2427 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2430 if (cfg->abs_patches == NULL)
2431 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2432 g_hash_table_insert (cfg->abs_patches, ji, ji);
2433 ins = mono_emit_native_call (cfg, ji, sig, args);
2434 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2439 get_memcpy_method (void)
2441 static MonoMethod *memcpy_method = NULL;
2442 if (!memcpy_method) {
2443 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2445 g_error ("Old corlib found. Install a new one");
2447 return memcpy_method;
2451 * Emit code to copy a valuetype of type @klass whose address is stored in
2452 * @src->dreg to memory whose address is stored at @dest->dreg.
2455 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2457 MonoInst *iargs [3];
2460 MonoMethod *memcpy_method;
2464 * This check breaks with spilled vars... need to handle it during verification anyway.
2465 * g_assert (klass && klass == src->klass && klass == dest->klass);
2469 n = mono_class_native_size (klass, &align);
2471 n = mono_class_value_size (klass, &align);
2473 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2474 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2475 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2479 EMIT_NEW_ICONST (cfg, iargs [2], n);
2481 memcpy_method = get_memcpy_method ();
2482 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2487 get_memset_method (void)
2489 static MonoMethod *memset_method = NULL;
2490 if (!memset_method) {
2491 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2493 g_error ("Old corlib found. Install a new one");
2495 return memset_method;
2499 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2501 MonoInst *iargs [3];
2504 MonoMethod *memset_method;
2506 /* FIXME: Optimize this for the case when dest is an LDADDR */
2508 mono_class_init (klass);
2509 n = mono_class_value_size (klass, &align);
2511 if (n <= sizeof (gpointer) * 5) {
2512 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2515 memset_method = get_memset_method ();
2517 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2518 EMIT_NEW_ICONST (cfg, iargs [2], n);
2519 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2524 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2526 MonoInst *this = NULL;
2528 g_assert (cfg->generic_sharing_context);
2530 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2531 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2532 !method->klass->valuetype)
2533 EMIT_NEW_ARGLOAD (cfg, this, 0);
2535 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2536 MonoInst *mrgctx_loc, *mrgctx_var;
2539 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2541 mrgctx_loc = mono_get_vtable_var (cfg);
2542 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2545 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2546 MonoInst *vtable_loc, *vtable_var;
2550 vtable_loc = mono_get_vtable_var (cfg);
2551 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2553 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2554 MonoInst *mrgctx_var = vtable_var;
2557 vtable_reg = alloc_preg (cfg);
2558 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2559 vtable_var->type = STACK_PTR;
2565 int vtable_reg, res_reg;
2567 vtable_reg = alloc_preg (cfg);
2568 res_reg = alloc_preg (cfg);
2569 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2574 static MonoJumpInfoRgctxEntry *
2575 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2577 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2578 res->method = method;
2579 res->in_mrgctx = in_mrgctx;
2580 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2581 res->data->type = patch_type;
2582 res->data->data.target = patch_data;
2583 res->info_type = info_type;
2588 static inline MonoInst*
2589 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2591 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2595 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2596 MonoClass *klass, int rgctx_type)
2598 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2599 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2601 return emit_rgctx_fetch (cfg, rgctx, entry);
2605 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2606 MonoMethod *cmethod, int rgctx_type)
2608 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2609 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2611 return emit_rgctx_fetch (cfg, rgctx, entry);
2615 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2616 MonoClassField *field, int rgctx_type)
2618 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2619 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2621 return emit_rgctx_fetch (cfg, rgctx, entry);
2625 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2627 int vtable_reg = alloc_preg (cfg);
2628 int context_used = 0;
2630 if (cfg->generic_sharing_context)
2631 context_used = mono_class_check_context_used (array_class);
2633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2635 if (cfg->opt & MONO_OPT_SHARED) {
2636 int class_reg = alloc_preg (cfg);
2637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2638 if (cfg->compile_aot) {
2639 int klass_reg = alloc_preg (cfg);
2640 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2645 } else if (context_used) {
2646 MonoInst *vtable_ins;
2648 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2651 if (cfg->compile_aot) {
2652 int vt_reg = alloc_preg (cfg);
2653 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2660 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2664 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2666 if (mini_get_debug_options ()->better_cast_details) {
2667 int to_klass_reg = alloc_preg (cfg);
2668 int vtable_reg = alloc_preg (cfg);
2669 int klass_reg = alloc_preg (cfg);
2670 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2673 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2677 MONO_ADD_INS (cfg->cbb, tls_get);
2678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2682 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2688 reset_cast_details (MonoCompile *cfg)
2690 /* Reset the variables holding the cast details */
2691 if (mini_get_debug_options ()->better_cast_details) {
2692 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2694 MONO_ADD_INS (cfg->cbb, tls_get);
2695 /* It is enough to reset the from field */
2696 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2701 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2702 * generic code is generated.
2705 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2707 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2710 MonoInst *rgctx, *addr;
2712 /* FIXME: What if the class is shared? We might not
2713 have to get the address of the method from the
2715 addr = emit_get_rgctx_method (cfg, context_used, method,
2716 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2718 rgctx = emit_get_rgctx (cfg, method, context_used);
2720 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2722 return mono_emit_method_call (cfg, method, &val, NULL);
2727 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2731 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2732 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2733 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2734 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2736 obj_reg = sp [0]->dreg;
2737 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2738 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2740 /* FIXME: generics */
2741 g_assert (klass->rank == 0);
2744 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2745 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2751 MonoInst *element_class;
2753 /* This assertion is from the unboxcast insn */
2754 g_assert (klass->rank == 0);
2756 element_class = emit_get_rgctx_klass (cfg, context_used,
2757 klass->element_class, MONO_RGCTX_INFO_KLASS);
2759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2760 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2762 save_cast_details (cfg, klass->element_class, obj_reg);
2763 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2764 reset_cast_details (cfg);
2767 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2768 MONO_ADD_INS (cfg->cbb, add);
2769 add->type = STACK_MP;
2776 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2778 MonoInst *iargs [2];
2781 if (cfg->opt & MONO_OPT_SHARED) {
2782 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2783 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2785 alloc_ftn = mono_object_new;
2786 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2787 /* This happens often in argument checking code, eg. throw new FooException... */
2788 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2789 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2790 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2792 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2793 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2796 if (managed_alloc) {
2797 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2798 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2800 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2802 guint32 lw = vtable->klass->instance_size;
2803 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2804 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2805 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2808 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2812 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2816 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2819 MonoInst *iargs [2];
2820 MonoMethod *managed_alloc = NULL;
2824 FIXME: we cannot get managed_alloc here because we can't get
2825 the class's vtable (because it's not a closed class)
2827 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2828 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2831 if (cfg->opt & MONO_OPT_SHARED) {
2832 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2833 iargs [1] = data_inst;
2834 alloc_ftn = mono_object_new;
2836 if (managed_alloc) {
2837 iargs [0] = data_inst;
2838 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2841 iargs [0] = data_inst;
2842 alloc_ftn = mono_object_new_specific;
2845 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2849 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2851 MonoInst *alloc, *ins;
2853 if (mono_class_is_nullable (klass)) {
2854 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2855 return mono_emit_method_call (cfg, method, &val, NULL);
2858 alloc = handle_alloc (cfg, klass, TRUE);
2860 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2866 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2868 MonoInst *alloc, *ins;
2870 if (mono_class_is_nullable (klass)) {
2871 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2872 /* FIXME: What if the class is shared? We might not
2873 have to get the method address from the RGCTX. */
2874 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2875 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2876 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2878 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2880 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2882 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2889 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2891 MonoBasicBlock *is_null_bb;
2892 int obj_reg = src->dreg;
2893 int vtable_reg = alloc_preg (cfg);
2895 NEW_BBLOCK (cfg, is_null_bb);
2897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2900 save_cast_details (cfg, klass, obj_reg);
2902 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2903 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2904 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2906 int klass_reg = alloc_preg (cfg);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2910 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2911 /* the remoting code is broken, access the class for now */
2913 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2919 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2922 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2926 MONO_START_BB (cfg, is_null_bb);
2928 reset_cast_details (cfg);
2934 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2937 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2938 int obj_reg = src->dreg;
2939 int vtable_reg = alloc_preg (cfg);
2940 int res_reg = alloc_preg (cfg);
2942 NEW_BBLOCK (cfg, is_null_bb);
2943 NEW_BBLOCK (cfg, false_bb);
2944 NEW_BBLOCK (cfg, end_bb);
2946 /* Do the assignment at the beginning, so the other assignment can be if converted */
2947 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2948 ins->type = STACK_OBJ;
2951 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2954 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2955 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2956 /* the is_null_bb target simply copies the input register to the output */
2957 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2959 int klass_reg = alloc_preg (cfg);
2961 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2964 int rank_reg = alloc_preg (cfg);
2965 int eclass_reg = alloc_preg (cfg);
2967 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2970 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2972 if (klass->cast_class == mono_defaults.object_class) {
2973 int parent_reg = alloc_preg (cfg);
2974 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2975 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2976 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2978 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2979 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2980 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2982 } else if (klass->cast_class == mono_defaults.enum_class) {
2983 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2985 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2986 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2988 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2989 /* Check that the object is a vector too */
2990 int bounds_reg = alloc_preg (cfg);
2991 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2996 /* the is_null_bb target simply copies the input register to the output */
2997 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2999 } else if (mono_class_is_nullable (klass)) {
3000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3001 /* the is_null_bb target simply copies the input register to the output */
3002 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3004 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3005 /* the remoting code is broken, access the class for now */
3007 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3013 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3016 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3017 /* the is_null_bb target simply copies the input register to the output */
3018 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3023 MONO_START_BB (cfg, false_bb);
3025 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3026 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3028 MONO_START_BB (cfg, is_null_bb);
3030 MONO_START_BB (cfg, end_bb);
3036 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3038 /* This opcode takes as input an object reference and a class, and returns:
3039 0) if the object is an instance of the class,
3040 1) if the object is not instance of the class,
3041 2) if the object is a proxy whose type cannot be determined */
3044 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3045 int obj_reg = src->dreg;
3046 int dreg = alloc_ireg (cfg);
3048 int klass_reg = alloc_preg (cfg);
3050 NEW_BBLOCK (cfg, true_bb);
3051 NEW_BBLOCK (cfg, false_bb);
3052 NEW_BBLOCK (cfg, false2_bb);
3053 NEW_BBLOCK (cfg, end_bb);
3054 NEW_BBLOCK (cfg, no_proxy_bb);
3056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3059 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3060 NEW_BBLOCK (cfg, interface_fail_bb);
3062 tmp_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3064 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3065 MONO_START_BB (cfg, interface_fail_bb);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3068 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3070 tmp_reg = alloc_preg (cfg);
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3073 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3075 tmp_reg = alloc_preg (cfg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3079 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3080 tmp_reg = alloc_preg (cfg);
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3084 tmp_reg = alloc_preg (cfg);
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3087 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3089 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3090 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3092 MONO_START_BB (cfg, no_proxy_bb);
3094 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3097 MONO_START_BB (cfg, false_bb);
3099 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3102 MONO_START_BB (cfg, false2_bb);
3104 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3105 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3107 MONO_START_BB (cfg, true_bb);
3109 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3111 MONO_START_BB (cfg, end_bb);
3114 MONO_INST_NEW (cfg, ins, OP_ICONST);
3116 ins->type = STACK_I4;
3122 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3124 /* This opcode takes as input an object reference and a class, and returns:
3125 0) if the object is an instance of the class,
3126 1) if the object is a proxy whose type cannot be determined
3127 an InvalidCastException exception is thrown otherwhise*/
3130 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3131 int obj_reg = src->dreg;
3132 int dreg = alloc_ireg (cfg);
3133 int tmp_reg = alloc_preg (cfg);
3134 int klass_reg = alloc_preg (cfg);
3136 NEW_BBLOCK (cfg, end_bb);
3137 NEW_BBLOCK (cfg, ok_result_bb);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3142 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3143 NEW_BBLOCK (cfg, interface_fail_bb);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3147 MONO_START_BB (cfg, interface_fail_bb);
3148 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3150 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3152 tmp_reg = alloc_preg (cfg);
3153 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3155 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3157 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3161 NEW_BBLOCK (cfg, no_proxy_bb);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3165 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3167 tmp_reg = alloc_preg (cfg);
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3171 tmp_reg = alloc_preg (cfg);
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3173 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3174 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3176 NEW_BBLOCK (cfg, fail_1_bb);
3178 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3180 MONO_START_BB (cfg, fail_1_bb);
3182 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3183 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3185 MONO_START_BB (cfg, no_proxy_bb);
3187 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3190 MONO_START_BB (cfg, ok_result_bb);
3192 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3194 MONO_START_BB (cfg, end_bb);
3197 MONO_INST_NEW (cfg, ins, OP_ICONST);
3199 ins->type = STACK_I4;
3204 static G_GNUC_UNUSED MonoInst*
3205 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3207 gpointer *trampoline;
3208 MonoInst *obj, *method_ins, *tramp_ins;
3212 obj = handle_alloc (cfg, klass, FALSE);
3214 /* Inline the contents of mono_delegate_ctor */
3216 /* Set target field */
3217 /* Optimize away setting of NULL target */
3218 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3219 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3221 /* Set method field */
3222 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3223 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3226 * To avoid looking up the compiled code belonging to the target method
3227 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3228 * store it, and we fill it after the method has been compiled.
3230 if (!cfg->compile_aot && !method->dynamic) {
3231 MonoInst *code_slot_ins;
3233 domain = mono_domain_get ();
3234 mono_domain_lock (domain);
3235 if (!domain_jit_info (domain)->method_code_hash)
3236 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3237 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3239 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3240 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3242 mono_domain_unlock (domain);
3244 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3245 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3248 /* Set invoke_impl field */
3249 trampoline = mono_create_delegate_trampoline (klass);
3250 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3251 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3253 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3259 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3261 MonoJitICallInfo *info;
3263 /* Need to register the icall so it gets an icall wrapper */
3264 info = mono_get_array_new_va_icall (rank);
3266 cfg->flags |= MONO_CFG_HAS_VARARGS;
3268 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3269 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3273 mono_emit_load_got_addr (MonoCompile *cfg)
3275 MonoInst *getaddr, *dummy_use;
3277 if (!cfg->got_var || cfg->got_var_allocated)
3280 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3281 getaddr->dreg = cfg->got_var->dreg;
3283 /* Add it to the start of the first bblock */
3284 if (cfg->bb_entry->code) {
3285 getaddr->next = cfg->bb_entry->code;
3286 cfg->bb_entry->code = getaddr;
3289 MONO_ADD_INS (cfg->bb_entry, getaddr);
3291 cfg->got_var_allocated = TRUE;
3294 * Add a dummy use to keep the got_var alive, since real uses might
3295 * only be generated by the back ends.
3296 * Add it to end_bblock, so the variable's lifetime covers the whole
3298 * It would be better to make the usage of the got var explicit in all
3299 * cases when the backend needs it (i.e. calls, throw etc.), so this
3300 * wouldn't be needed.
3302 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3303 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3307 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3309 MonoMethodHeader *header = mono_method_get_header (method);
3311 #ifdef MONO_ARCH_SOFT_FLOAT
3312 MonoMethodSignature *sig = mono_method_signature (method);
3316 if (cfg->generic_sharing_context)
3319 #ifdef MONO_ARCH_HAVE_LMF_OPS
3320 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3321 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3322 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3326 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3327 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3328 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3329 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3330 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3331 (method->klass->marshalbyref) ||
3332 !header || header->num_clauses)
3335 /* also consider num_locals? */
3336 /* Do the size check early to avoid creating vtables */
3337 if (getenv ("MONO_INLINELIMIT")) {
3338 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3341 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3345 * if we can initialize the class of the method right away, we do,
3346 * otherwise we don't allow inlining if the class needs initialization,
3347 * since it would mean inserting a call to mono_runtime_class_init()
3348 * inside the inlined code
3350 if (!(cfg->opt & MONO_OPT_SHARED)) {
3351 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3352 if (cfg->run_cctors && method->klass->has_cctor) {
3353 if (!method->klass->runtime_info)
3354 /* No vtable created yet */
3356 vtable = mono_class_vtable (cfg->domain, method->klass);
3359 /* This makes so that inline cannot trigger */
3360 /* .cctors: too many apps depend on them */
3361 /* running with a specific order... */
3362 if (! vtable->initialized)
3364 mono_runtime_class_init (vtable);
3366 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3367 if (!method->klass->runtime_info)
3368 /* No vtable created yet */
3370 vtable = mono_class_vtable (cfg->domain, method->klass);
3373 if (!vtable->initialized)
3378 * If we're compiling for shared code
3379 * the cctor will need to be run at aot method load time, for example,
3380 * or at the end of the compilation of the inlining method.
3382 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3387 * CAS - do not inline methods with declarative security
3388 * Note: this has to be before any possible return TRUE;
3390 if (mono_method_has_declsec (method))
3393 #ifdef MONO_ARCH_SOFT_FLOAT
3395 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3397 for (i = 0; i < sig->param_count; ++i)
3398 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3406 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3408 if (vtable->initialized && !cfg->compile_aot)
3411 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3414 if (!mono_class_needs_cctor_run (vtable->klass, method))
3417 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3418 /* The initialization is already done before the method is called */
3425 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3429 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3431 mono_class_init (klass);
3432 size = mono_class_array_element_size (klass);
3434 mult_reg = alloc_preg (cfg);
3435 array_reg = arr->dreg;
3436 index_reg = index->dreg;
3438 #if SIZEOF_VOID_P == 8
3439 /* The array reg is 64 bits but the index reg is only 32 */
3440 index2_reg = alloc_preg (cfg);
3441 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3443 index2_reg = index_reg;
3446 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3448 #if defined(__i386__) || defined(__x86_64__)
3449 if (size == 1 || size == 2 || size == 4 || size == 8) {
3450 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3452 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3453 ins->type = STACK_PTR;
3459 add_reg = alloc_preg (cfg);
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3463 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3464 ins->type = STACK_PTR;
3465 MONO_ADD_INS (cfg->cbb, ins);
3470 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3472 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3474 int bounds_reg = alloc_preg (cfg);
3475 int add_reg = alloc_preg (cfg);
3476 int mult_reg = alloc_preg (cfg);
3477 int mult2_reg = alloc_preg (cfg);
3478 int low1_reg = alloc_preg (cfg);
3479 int low2_reg = alloc_preg (cfg);
3480 int high1_reg = alloc_preg (cfg);
3481 int high2_reg = alloc_preg (cfg);
3482 int realidx1_reg = alloc_preg (cfg);
3483 int realidx2_reg = alloc_preg (cfg);
3484 int sum_reg = alloc_preg (cfg);
3489 mono_class_init (klass);
3490 size = mono_class_array_element_size (klass);
3492 index1 = index_ins1->dreg;
3493 index2 = index_ins2->dreg;
3495 /* range checking */
3496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3497 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3500 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3501 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3503 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3505 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3508 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3509 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3511 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3512 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3513 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3515 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3516 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3519 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3521 ins->type = STACK_MP;
3523 MONO_ADD_INS (cfg->cbb, ins);
3530 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3534 MonoMethod *addr_method;
3537 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3540 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3542 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3543 /* emit_ldelema_2 depends on OP_LMUL */
3544 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3545 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3549 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3550 addr_method = mono_marshal_get_array_address (rank, element_size);
3551 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3557 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3559 MonoInst *ins = NULL;
3561 static MonoClass *runtime_helpers_class = NULL;
3562 if (! runtime_helpers_class)
3563 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3564 "System.Runtime.CompilerServices", "RuntimeHelpers");
3566 if (cmethod->klass == mono_defaults.string_class) {
3567 if (strcmp (cmethod->name, "get_Chars") == 0) {
3568 int dreg = alloc_ireg (cfg);
3569 int index_reg = alloc_preg (cfg);
3570 int mult_reg = alloc_preg (cfg);
3571 int add_reg = alloc_preg (cfg);
3573 #if SIZEOF_VOID_P == 8
3574 /* The array reg is 64 bits but the index reg is only 32 */
3575 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3577 index_reg = args [1]->dreg;
3579 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3581 #if defined(__i386__) || defined(__x86_64__)
3582 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3583 add_reg = ins->dreg;
3584 /* Avoid a warning */
3586 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3590 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3592 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3594 type_from_op (ins, NULL, NULL);
3596 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3597 int dreg = alloc_ireg (cfg);
3598 /* Decompose later to allow more optimizations */
3599 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3600 ins->type = STACK_I4;
3601 cfg->cbb->has_array_access = TRUE;
3602 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3605 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3606 int mult_reg = alloc_preg (cfg);
3607 int add_reg = alloc_preg (cfg);
3609 /* The corlib functions check for oob already. */
3610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3611 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3612 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3615 } else if (cmethod->klass == mono_defaults.object_class) {
3617 if (strcmp (cmethod->name, "GetType") == 0) {
3618 int dreg = alloc_preg (cfg);
3619 int vt_reg = alloc_preg (cfg);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3621 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3622 type_from_op (ins, NULL, NULL);
3625 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3626 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3627 int dreg = alloc_ireg (cfg);
3628 int t1 = alloc_ireg (cfg);
3630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3631 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3632 ins->type = STACK_I4;
3636 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3637 MONO_INST_NEW (cfg, ins, OP_NOP);
3638 MONO_ADD_INS (cfg->cbb, ins);
3642 } else if (cmethod->klass == mono_defaults.array_class) {
3643 if (cmethod->name [0] != 'g')
3646 if (strcmp (cmethod->name, "get_Rank") == 0) {
3647 int dreg = alloc_ireg (cfg);
3648 int vtable_reg = alloc_preg (cfg);
3649 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3650 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3651 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3652 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3653 type_from_op (ins, NULL, NULL);
3656 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3657 int dreg = alloc_ireg (cfg);
3659 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3660 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3661 type_from_op (ins, NULL, NULL);
3666 } else if (cmethod->klass == runtime_helpers_class) {
3668 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3669 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3673 } else if (cmethod->klass == mono_defaults.thread_class) {
3674 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3675 ins->dreg = alloc_preg (cfg);
3676 ins->type = STACK_OBJ;
3677 MONO_ADD_INS (cfg->cbb, ins);
3679 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3680 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3681 MONO_ADD_INS (cfg->cbb, ins);
3683 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3684 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3685 MONO_ADD_INS (cfg->cbb, ins);
3688 } else if (mini_class_is_system_array (cmethod->klass) &&
3689 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3690 MonoInst *addr, *store, *load;
3691 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3693 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3694 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3695 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3697 } else if (cmethod->klass->image == mono_defaults.corlib &&
3698 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3699 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3702 #if SIZEOF_VOID_P == 8
3703 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3704 /* 64 bit reads are already atomic */
3705 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3706 ins->dreg = mono_alloc_preg (cfg);
3707 ins->inst_basereg = args [0]->dreg;
3708 ins->inst_offset = 0;
3709 MONO_ADD_INS (cfg->cbb, ins);
3713 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3714 if (strcmp (cmethod->name, "Increment") == 0) {
3715 MonoInst *ins_iconst;
3718 if (fsig->params [0]->type == MONO_TYPE_I4)
3719 opcode = OP_ATOMIC_ADD_NEW_I4;
3720 #if SIZEOF_VOID_P == 8
3721 else if (fsig->params [0]->type == MONO_TYPE_I8)
3722 opcode = OP_ATOMIC_ADD_NEW_I8;
3725 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3726 ins_iconst->inst_c0 = 1;
3727 ins_iconst->dreg = mono_alloc_ireg (cfg);
3728 MONO_ADD_INS (cfg->cbb, ins_iconst);
3730 MONO_INST_NEW (cfg, ins, opcode);
3731 ins->dreg = mono_alloc_ireg (cfg);
3732 ins->inst_basereg = args [0]->dreg;
3733 ins->inst_offset = 0;
3734 ins->sreg2 = ins_iconst->dreg;
3735 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3736 MONO_ADD_INS (cfg->cbb, ins);
3738 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3739 MonoInst *ins_iconst;
3742 if (fsig->params [0]->type == MONO_TYPE_I4)
3743 opcode = OP_ATOMIC_ADD_NEW_I4;
3744 #if SIZEOF_VOID_P == 8
3745 else if (fsig->params [0]->type == MONO_TYPE_I8)
3746 opcode = OP_ATOMIC_ADD_NEW_I8;
3749 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3750 ins_iconst->inst_c0 = -1;
3751 ins_iconst->dreg = mono_alloc_ireg (cfg);
3752 MONO_ADD_INS (cfg->cbb, ins_iconst);
3754 MONO_INST_NEW (cfg, ins, opcode);
3755 ins->dreg = mono_alloc_ireg (cfg);
3756 ins->inst_basereg = args [0]->dreg;
3757 ins->inst_offset = 0;
3758 ins->sreg2 = ins_iconst->dreg;
3759 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3760 MONO_ADD_INS (cfg->cbb, ins);
3762 } else if (strcmp (cmethod->name, "Add") == 0) {
3765 if (fsig->params [0]->type == MONO_TYPE_I4)
3766 opcode = OP_ATOMIC_ADD_NEW_I4;
3767 #if SIZEOF_VOID_P == 8
3768 else if (fsig->params [0]->type == MONO_TYPE_I8)
3769 opcode = OP_ATOMIC_ADD_NEW_I8;
3773 MONO_INST_NEW (cfg, ins, opcode);
3774 ins->dreg = mono_alloc_ireg (cfg);
3775 ins->inst_basereg = args [0]->dreg;
3776 ins->inst_offset = 0;
3777 ins->sreg2 = args [1]->dreg;
3778 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3779 MONO_ADD_INS (cfg->cbb, ins);
3782 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3784 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3785 if (strcmp (cmethod->name, "Exchange") == 0) {
3788 if (fsig->params [0]->type == MONO_TYPE_I4)
3789 opcode = OP_ATOMIC_EXCHANGE_I4;
3790 #if SIZEOF_VOID_P == 8
3791 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3792 (fsig->params [0]->type == MONO_TYPE_I) ||
3793 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3794 opcode = OP_ATOMIC_EXCHANGE_I8;
3796 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3797 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3798 opcode = OP_ATOMIC_EXCHANGE_I4;
3803 MONO_INST_NEW (cfg, ins, opcode);
3804 ins->dreg = mono_alloc_ireg (cfg);
3805 ins->inst_basereg = args [0]->dreg;
3806 ins->inst_offset = 0;
3807 ins->sreg2 = args [1]->dreg;
3808 MONO_ADD_INS (cfg->cbb, ins);
3810 switch (fsig->params [0]->type) {
3812 ins->type = STACK_I4;
3816 ins->type = STACK_I8;
3818 case MONO_TYPE_OBJECT:
3819 ins->type = STACK_OBJ;
3822 g_assert_not_reached ();
3825 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3827 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3829 * Can't implement CompareExchange methods this way since they have
3830 * three arguments. We can implement one of the common cases, where the new
3831 * value is a constant.
3833 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3834 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3835 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3836 ins->dreg = alloc_ireg (cfg);
3837 ins->sreg1 = args [0]->dreg;
3838 ins->sreg2 = args [1]->dreg;
3839 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3840 ins->type = STACK_I4;
3841 MONO_ADD_INS (cfg->cbb, ins);
3843 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3845 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3849 } else if (cmethod->klass->image == mono_defaults.corlib) {
3850 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3851 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3852 MONO_INST_NEW (cfg, ins, OP_BREAK);
3853 MONO_ADD_INS (cfg->cbb, ins);
3856 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3857 && strcmp (cmethod->klass->name, "Environment") == 0) {
3858 #ifdef PLATFORM_WIN32
3859 EMIT_NEW_ICONST (cfg, ins, 1);
3861 EMIT_NEW_ICONST (cfg, ins, 0);
3865 } else if (cmethod->klass == mono_defaults.math_class) {
3867 * There is general branches code for Min/Max, but it does not work for
3869 * http://everything2.com/?node_id=1051618
3873 #ifdef MONO_ARCH_SIMD_INTRINSICS
3874 if (cfg->opt & MONO_OPT_SIMD) {
3875 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3881 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3885 * This entry point could be used later for arbitrary method
3888 inline static MonoInst*
3889 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3890 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3892 if (method->klass == mono_defaults.string_class) {
3893 /* managed string allocation support */
3894 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3895 MonoInst *iargs [2];
3896 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3897 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3900 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3901 iargs [1] = args [0];
3902 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3909 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3911 MonoInst *store, *temp;
3914 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3915 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3918 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3919 * would be different than the MonoInst's used to represent arguments, and
3920 * the ldelema implementation can't deal with that.
3921 * Solution: When ldelema is used on an inline argument, create a var for
3922 * it, emit ldelema on that var, and emit the saving code below in
3923 * inline_method () if needed.
3925 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3926 cfg->args [i] = temp;
3927 /* This uses cfg->args [i] which is set by the preceeding line */
3928 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3929 store->cil_code = sp [0]->cil_code;
3934 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3935 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3937 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3939 check_inline_called_method_name_limit (MonoMethod *called_method)
3942 static char *limit = NULL;
3944 if (limit == NULL) {
3945 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3947 if (limit_string != NULL)
3948 limit = limit_string;
3950 limit = (char *) "";
3953 if (limit [0] != '\0') {
3954 char *called_method_name = mono_method_full_name (called_method, TRUE);
3956 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3957 g_free (called_method_name);
3959 //return (strncmp_result <= 0);
3960 return (strncmp_result == 0);
3967 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3969 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3972 static char *limit = NULL;
3974 if (limit == NULL) {
3975 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3976 if (limit_string != NULL) {
3977 limit = limit_string;
3979 limit = (char *) "";
3983 if (limit [0] != '\0') {
3984 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3986 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3987 g_free (caller_method_name);
3989 //return (strncmp_result <= 0);
3990 return (strncmp_result == 0);
3998 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3999 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4001 MonoInst *ins, *rvar = NULL;
4002 MonoMethodHeader *cheader;
4003 MonoBasicBlock *ebblock, *sbblock;
4005 MonoMethod *prev_inlined_method;
4006 MonoInst **prev_locals, **prev_args;
4007 MonoType **prev_arg_types;
4008 guint prev_real_offset;
4009 GHashTable *prev_cbb_hash;
4010 MonoBasicBlock **prev_cil_offset_to_bb;
4011 MonoBasicBlock *prev_cbb;
4012 unsigned char* prev_cil_start;
4013 guint32 prev_cil_offset_to_bb_len;
4014 MonoMethod *prev_current_method;
4015 MonoGenericContext *prev_generic_context;
4017 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4019 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4020 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4023 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4024 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4028 if (cfg->verbose_level > 2)
4029 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4031 if (!cmethod->inline_info) {
4032 mono_jit_stats.inlineable_methods++;
4033 cmethod->inline_info = 1;
4035 /* allocate space to store the return value */
4036 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4037 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4040 /* allocate local variables */
4041 cheader = mono_method_get_header (cmethod);
4042 prev_locals = cfg->locals;
4043 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4044 for (i = 0; i < cheader->num_locals; ++i)
4045 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4047 /* allocate start and end blocks */
4048 /* This is needed so if the inline is aborted, we can clean up */
4049 NEW_BBLOCK (cfg, sbblock);
4050 sbblock->real_offset = real_offset;
4052 NEW_BBLOCK (cfg, ebblock);
4053 ebblock->block_num = cfg->num_bblocks++;
4054 ebblock->real_offset = real_offset;
4056 prev_args = cfg->args;
4057 prev_arg_types = cfg->arg_types;
4058 prev_inlined_method = cfg->inlined_method;
4059 cfg->inlined_method = cmethod;
4060 cfg->ret_var_set = FALSE;
4061 prev_real_offset = cfg->real_offset;
4062 prev_cbb_hash = cfg->cbb_hash;
4063 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4064 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4065 prev_cil_start = cfg->cil_start;
4066 prev_cbb = cfg->cbb;
4067 prev_current_method = cfg->current_method;
4068 prev_generic_context = cfg->generic_context;
4070 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4072 cfg->inlined_method = prev_inlined_method;
4073 cfg->real_offset = prev_real_offset;
4074 cfg->cbb_hash = prev_cbb_hash;
4075 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4076 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4077 cfg->cil_start = prev_cil_start;
4078 cfg->locals = prev_locals;
4079 cfg->args = prev_args;
4080 cfg->arg_types = prev_arg_types;
4081 cfg->current_method = prev_current_method;
4082 cfg->generic_context = prev_generic_context;
4084 if ((costs >= 0 && costs < 60) || inline_allways) {
4085 if (cfg->verbose_level > 2)
4086 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4088 mono_jit_stats.inlined_methods++;
4090 /* always add some code to avoid block split failures */
4091 MONO_INST_NEW (cfg, ins, OP_NOP);
4092 MONO_ADD_INS (prev_cbb, ins);
4094 prev_cbb->next_bb = sbblock;
4095 link_bblock (cfg, prev_cbb, sbblock);
4098 * Get rid of the begin and end bblocks if possible to aid local
4101 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4103 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4104 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4106 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4107 MonoBasicBlock *prev = ebblock->in_bb [0];
4108 mono_merge_basic_blocks (cfg, prev, ebblock);
4116 * If the inlined method contains only a throw, then the ret var is not
4117 * set, so set it to a dummy value.
4119 if (!cfg->ret_var_set) {
4120 static double r8_0 = 0.0;
4122 switch (rvar->type) {
4124 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4127 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4132 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4135 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4136 ins->type = STACK_R8;
4137 ins->inst_p0 = (void*)&r8_0;
4138 ins->dreg = rvar->dreg;
4139 MONO_ADD_INS (cfg->cbb, ins);
4142 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4145 g_assert_not_reached ();
4149 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4154 if (cfg->verbose_level > 2)
4155 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4156 cfg->exception_type = MONO_EXCEPTION_NONE;
4157 mono_loader_clear_error ();
4159 /* This gets rid of the newly added bblocks */
4160 cfg->cbb = prev_cbb;
4166 * Some of these comments may well be out-of-date.
4167 * Design decisions: we do a single pass over the IL code (and we do bblock
4168 * splitting/merging in the few cases when it's required: a back jump to an IL
4169 * address that was not already seen as bblock starting point).
4170 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4171 * Complex operations are decomposed in simpler ones right away. We need to let the
4172 * arch-specific code peek and poke inside this process somehow (except when the
4173 * optimizations can take advantage of the full semantic info of coarse opcodes).
4174 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4175 * MonoInst->opcode initially is the IL opcode or some simplification of that
4176 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4177 * opcode with value bigger than OP_LAST.
4178 * At this point the IR can be handed over to an interpreter, a dumb code generator
4179 * or to the optimizing code generator that will translate it to SSA form.
4181 * Profiling directed optimizations.
4182 * We may compile by default with few or no optimizations and instrument the code
4183 * or the user may indicate what methods to optimize the most either in a config file
4184 * or through repeated runs where the compiler applies offline the optimizations to
4185 * each method and then decides if it was worth it.
4188 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4189 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4190 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4191 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4192 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4193 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4194 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4195 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4197 /* offset from br.s -> br like opcodes */
4198 #define BIG_BRANCH_OFFSET 13
4201 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4203 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4205 return b == NULL || b == bb;
4209 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4211 unsigned char *ip = start;
4212 unsigned char *target;
4215 MonoBasicBlock *bblock;
4216 const MonoOpcode *opcode;
4219 cli_addr = ip - start;
4220 i = mono_opcode_value ((const guint8 **)&ip, end);
4223 opcode = &mono_opcodes [i];
4224 switch (opcode->argument) {
4225 case MonoInlineNone:
4228 case MonoInlineString:
4229 case MonoInlineType:
4230 case MonoInlineField:
4231 case MonoInlineMethod:
4234 case MonoShortInlineR:
4241 case MonoShortInlineVar:
4242 case MonoShortInlineI:
4245 case MonoShortInlineBrTarget:
4246 target = start + cli_addr + 2 + (signed char)ip [1];
4247 GET_BBLOCK (cfg, bblock, target);
4250 GET_BBLOCK (cfg, bblock, ip);
4252 case MonoInlineBrTarget:
4253 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4254 GET_BBLOCK (cfg, bblock, target);
4257 GET_BBLOCK (cfg, bblock, ip);
4259 case MonoInlineSwitch: {
4260 guint32 n = read32 (ip + 1);
4263 cli_addr += 5 + 4 * n;
4264 target = start + cli_addr;
4265 GET_BBLOCK (cfg, bblock, target);
4267 for (j = 0; j < n; ++j) {
4268 target = start + cli_addr + (gint32)read32 (ip);
4269 GET_BBLOCK (cfg, bblock, target);
4279 g_assert_not_reached ();
4282 if (i == CEE_THROW) {
4283 unsigned char *bb_start = ip - 1;
4285 /* Find the start of the bblock containing the throw */
4287 while ((bb_start >= start) && !bblock) {
4288 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4292 bblock->out_of_line = 1;
4301 static inline MonoMethod *
4302 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4306 if (m->wrapper_type != MONO_WRAPPER_NONE)
4307 return mono_method_get_wrapper_data (m, token);
4309 method = mono_get_method_full (m->klass->image, token, klass, context);
4314 static inline MonoMethod *
4315 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4317 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4319 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4325 static inline MonoClass*
4326 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4330 if (method->wrapper_type != MONO_WRAPPER_NONE)
4331 klass = mono_method_get_wrapper_data (method, token);
4333 klass = mono_class_get_full (method->klass->image, token, context);
4335 mono_class_init (klass);
4340 * Returns TRUE if the JIT should abort inlining because "callee"
4341 * is influenced by security attributes.
4344 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4348 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4352 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4353 if (result == MONO_JIT_SECURITY_OK)
4356 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4357 /* Generate code to throw a SecurityException before the actual call/link */
4358 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4361 NEW_ICONST (cfg, args [0], 4);
4362 NEW_METHODCONST (cfg, args [1], caller);
4363 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4364 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4365 /* don't hide previous results */
4366 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4367 cfg->exception_data = result;
4375 method_access_exception (void)
4377 static MonoMethod *method = NULL;
4380 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4381 method = mono_class_get_method_from_name (secman->securitymanager,
4382 "MethodAccessException", 2);
4389 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4390 MonoBasicBlock *bblock, unsigned char *ip)
4392 MonoMethod *thrower = method_access_exception ();
4395 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4396 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4397 mono_emit_method_call (cfg, thrower, args, NULL);
4401 verification_exception (void)
4403 static MonoMethod *method = NULL;
4406 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4407 method = mono_class_get_method_from_name (secman->securitymanager,
4408 "VerificationException", 0);
4415 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4417 MonoMethod *thrower = verification_exception ();
4419 mono_emit_method_call (cfg, thrower, NULL, NULL);
4423 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4424 MonoBasicBlock *bblock, unsigned char *ip)
4426 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4427 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4428 gboolean is_safe = TRUE;
4430 if (!(caller_level >= callee_level ||
4431 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4432 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4437 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4441 method_is_safe (MonoMethod *method)
4444 if (strcmp (method->name, "unsafeMethod") == 0)
4451 * Check that the IL instructions at ip are the array initialization
4452 * sequence and return the pointer to the data and the size.
4455 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4458 * newarr[System.Int32]
4460 * ldtoken field valuetype ...
4461 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4463 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4464 guint32 token = read32 (ip + 7);
4465 guint32 field_token = read32 (ip + 2);
4466 guint32 field_index = field_token & 0xffffff;
4468 const char *data_ptr;
4470 MonoMethod *cmethod;
4471 MonoClass *dummy_class;
4472 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4478 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4481 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4483 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4484 case MONO_TYPE_BOOLEAN:
4488 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4489 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4490 case MONO_TYPE_CHAR:
4500 return NULL; /* stupid ARM FP swapped format */
4510 if (size > mono_type_size (field->type, &dummy_align))
4513 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4514 if (!method->klass->image->dynamic) {
4515 field_index = read32 (ip + 2) & 0xffffff;
4516 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4517 data_ptr = mono_image_rva_map (method->klass->image, rva);
4518 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4519 /* for aot code we do the lookup on load */
4520 if (aot && data_ptr)
4521 return GUINT_TO_POINTER (rva);
4523 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4525 data_ptr = field->data;
4533 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4535 char *method_fname = mono_method_full_name (method, TRUE);
4538 if (mono_method_get_header (method)->code_size == 0)
4539 method_code = g_strdup ("method body is empty.");
4541 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4542 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4543 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4544 g_free (method_fname);
4545 g_free (method_code);
4549 set_exception_object (MonoCompile *cfg, MonoException *exception)
4551 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4552 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4553 cfg->exception_ptr = exception;
4557 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4561 if (cfg->generic_sharing_context)
4562 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4564 type = &klass->byval_arg;
4565 return MONO_TYPE_IS_REFERENCE (type);
4569 * mono_decompose_array_access_opts:
4571 * Decompose array access opcodes.
4572 * This should be in decompose.c, but it emits calls so it has to stay here until
4573 * the old JIT is gone.
4576 mono_decompose_array_access_opts (MonoCompile *cfg)
4578 MonoBasicBlock *bb, *first_bb;
4581 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4582 * can be executed anytime. It should be run before decompose_long
4586 * Create a dummy bblock and emit code into it so we can use the normal
4587 * code generation macros.
4589 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4590 first_bb = cfg->cbb;
4592 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4594 MonoInst *prev = NULL;
4596 MonoInst *iargs [3];
4599 if (!bb->has_array_access)
4602 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4604 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4610 for (ins = bb->code; ins; ins = ins->next) {
4611 switch (ins->opcode) {
4613 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4614 G_STRUCT_OFFSET (MonoArray, max_length));
4615 MONO_ADD_INS (cfg->cbb, dest);
4617 case OP_BOUNDS_CHECK:
4618 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4621 if (cfg->opt & MONO_OPT_SHARED) {
4622 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4623 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4624 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4625 iargs [2]->dreg = ins->sreg1;
4627 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4628 dest->dreg = ins->dreg;
4630 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4633 NEW_VTABLECONST (cfg, iargs [0], vtable);
4634 MONO_ADD_INS (cfg->cbb, iargs [0]);
4635 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4636 iargs [1]->dreg = ins->sreg1;
4638 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4639 dest->dreg = ins->dreg;
4643 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4644 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4645 MONO_ADD_INS (cfg->cbb, dest);
4651 g_assert (cfg->cbb == first_bb);
4653 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4654 /* Replace the original instruction with the new code sequence */
4656 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4657 first_bb->code = first_bb->last_ins = NULL;
4658 first_bb->in_count = first_bb->out_count = 0;
4659 cfg->cbb = first_bb;
4666 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4676 #ifdef MONO_ARCH_SOFT_FLOAT
4679 * mono_decompose_soft_float:
4681 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4682 * similar to long support on 32 bit platforms. 32 bit float values require special
4683 * handling when used as locals, arguments, and in calls.
4684 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4687 mono_decompose_soft_float (MonoCompile *cfg)
4689 MonoBasicBlock *bb, *first_bb;
4692 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4696 * Create a dummy bblock and emit code into it so we can use the normal
4697 * code generation macros.
4699 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4700 first_bb = cfg->cbb;
4702 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4704 MonoInst *prev = NULL;
4707 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4709 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4715 for (ins = bb->code; ins; ins = ins->next) {
4716 const char *spec = INS_INFO (ins->opcode);
4718 /* Most fp operations are handled automatically by opcode emulation */
4720 switch (ins->opcode) {
4723 d.vald = *(double*)ins->inst_p0;
4724 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4729 /* We load the r8 value */
4730 d.vald = *(float*)ins->inst_p0;
4731 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4735 ins->opcode = OP_LMOVE;
4738 ins->opcode = OP_MOVE;
4739 ins->sreg1 = ins->sreg1 + 1;
4742 ins->opcode = OP_MOVE;
4743 ins->sreg1 = ins->sreg1 + 2;
4746 int reg = ins->sreg1;
4748 ins->opcode = OP_SETLRET;
4750 ins->sreg1 = reg + 1;
4751 ins->sreg2 = reg + 2;
4754 case OP_LOADR8_MEMBASE:
4755 ins->opcode = OP_LOADI8_MEMBASE;
4757 case OP_STORER8_MEMBASE_REG:
4758 ins->opcode = OP_STOREI8_MEMBASE_REG;
4760 case OP_STORER4_MEMBASE_REG: {
4761 MonoInst *iargs [2];
4764 /* Arg 1 is the double value */
4765 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4766 iargs [0]->dreg = ins->sreg1;
4768 /* Arg 2 is the address to store to */
4769 addr_reg = mono_alloc_preg (cfg);
4770 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4771 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4775 case OP_LOADR4_MEMBASE: {
4776 MonoInst *iargs [1];
4780 addr_reg = mono_alloc_preg (cfg);
4781 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4782 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4783 conv->dreg = ins->dreg;
4788 case OP_FCALL_MEMBASE: {
4789 MonoCallInst *call = (MonoCallInst*)ins;
4790 if (call->signature->ret->type == MONO_TYPE_R4) {
4791 MonoCallInst *call2;
4792 MonoInst *iargs [1];
4795 /* Convert the call into a call returning an int */
4796 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4797 memcpy (call2, call, sizeof (MonoCallInst));
4798 switch (ins->opcode) {
4800 call2->inst.opcode = OP_CALL;
4803 call2->inst.opcode = OP_CALL_REG;
4805 case OP_FCALL_MEMBASE:
4806 call2->inst.opcode = OP_CALL_MEMBASE;
4809 g_assert_not_reached ();
4811 call2->inst.dreg = mono_alloc_ireg (cfg);
4812 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4814 /* FIXME: Optimize this */
4816 /* Emit an r4->r8 conversion */
4817 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4818 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4819 conv->dreg = ins->dreg;
4821 switch (ins->opcode) {
4823 ins->opcode = OP_LCALL;
4826 ins->opcode = OP_LCALL_REG;
4828 case OP_FCALL_MEMBASE:
4829 ins->opcode = OP_LCALL_MEMBASE;
4832 g_assert_not_reached ();
4838 MonoJitICallInfo *info;
4839 MonoInst *iargs [2];
4840 MonoInst *call, *cmp, *br;
4842 /* Convert fcompare+fbcc to icall+icompare+beq */
4844 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4847 /* Create dummy MonoInst's for the arguments */
4848 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4849 iargs [0]->dreg = ins->sreg1;
4850 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4851 iargs [1]->dreg = ins->sreg2;
4853 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4855 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4856 cmp->sreg1 = call->dreg;
4858 MONO_ADD_INS (cfg->cbb, cmp);
4860 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4861 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4862 br->inst_true_bb = ins->next->inst_true_bb;
4863 br->inst_false_bb = ins->next->inst_false_bb;
4864 MONO_ADD_INS (cfg->cbb, br);
4866 /* The call sequence might include fp ins */
4869 /* Skip fbcc or fccc */
4870 NULLIFY_INS (ins->next);
4878 MonoJitICallInfo *info;
4879 MonoInst *iargs [2];
4882 /* Convert fccc to icall+icompare+iceq */
4884 info = mono_find_jit_opcode_emulation (ins->opcode);
4887 /* Create dummy MonoInst's for the arguments */
4888 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4889 iargs [0]->dreg = ins->sreg1;
4890 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4891 iargs [1]->dreg = ins->sreg2;
4893 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4895 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4896 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4898 /* The call sequence might include fp ins */
4903 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4904 mono_print_ins (ins);
4905 g_assert_not_reached ();
4910 g_assert (cfg->cbb == first_bb);
4912 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4913 /* Replace the original instruction with the new code sequence */
4915 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4916 first_bb->code = first_bb->last_ins = NULL;
4917 first_bb->in_count = first_bb->out_count = 0;
4918 cfg->cbb = first_bb;
4925 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4928 mono_decompose_long_opts (cfg);
4934 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4937 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4938 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4939 /* Optimize reg-reg moves away */
4941 * Can't optimize other opcodes, since sp[0] might point to
4942 * the last ins of a decomposed opcode.
4944 sp [0]->dreg = (cfg)->locals [n]->dreg;
4946 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4951 * ldloca inhibits many optimizations so try to get rid of it in common
4954 static inline unsigned char *
4955 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4964 local = read16 (ip + 2);
4968 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4969 gboolean skip = FALSE;
4971 /* From the INITOBJ case */
4972 token = read32 (ip + 2);
4973 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
4974 CHECK_TYPELOAD (klass);
4975 if (generic_class_is_reference_type (cfg, klass)) {
4976 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4977 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
4978 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4979 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
4980 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
4993 * mono_method_to_ir: translates IL into basic blocks containing trees
4996 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4997 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4998 guint inline_offset, gboolean is_virtual_call)
5000 MonoInst *ins, **sp, **stack_start;
5001 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5002 MonoMethod *cmethod, *method_definition;
5003 MonoInst **arg_array;
5004 MonoMethodHeader *header;
5006 guint32 token, ins_flag;
5008 MonoClass *constrained_call = NULL;
5009 unsigned char *ip, *end, *target, *err_pos;
5010 static double r8_0 = 0.0;
5011 MonoMethodSignature *sig;
5012 MonoGenericContext *generic_context = NULL;
5013 MonoGenericContainer *generic_container = NULL;
5014 MonoType **param_types;
5015 int i, n, start_new_bblock, dreg;
5016 int num_calls = 0, inline_costs = 0;
5017 int breakpoint_id = 0;
5019 MonoBoolean security, pinvoke;
5020 MonoSecurityManager* secman = NULL;
5021 MonoDeclSecurityActions actions;
5022 GSList *class_inits = NULL;
5023 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5026 /* serialization and xdomain stuff may need access to private fields and methods */
5027 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5028 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5029 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5030 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5031 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5032 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5034 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5036 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5037 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5038 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5039 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5041 image = method->klass->image;
5042 header = mono_method_get_header (method);
5043 generic_container = mono_method_get_generic_container (method);
5044 sig = mono_method_signature (method);
5045 num_args = sig->hasthis + sig->param_count;
5046 ip = (unsigned char*)header->code;
5047 cfg->cil_start = ip;
5048 end = ip + header->code_size;
5049 mono_jit_stats.cil_code_size += header->code_size;
5051 method_definition = method;
5052 while (method_definition->is_inflated) {
5053 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5054 method_definition = imethod->declaring;
5057 /* SkipVerification is not allowed if core-clr is enabled */
5058 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5060 dont_verify_stloc = TRUE;
5063 if (!dont_verify && mini_method_verify (cfg, method_definition))
5064 goto exception_exit;
5066 if (sig->is_inflated)
5067 generic_context = mono_method_get_context (method);
5068 else if (generic_container)
5069 generic_context = &generic_container->context;
5070 cfg->generic_context = generic_context;
5072 if (!cfg->generic_sharing_context)
5073 g_assert (!sig->has_type_parameters);
5075 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5076 g_assert (method->is_inflated);
5077 g_assert (mono_method_get_context (method)->method_inst);
5079 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5080 g_assert (sig->generic_param_count);
5082 if (cfg->method == method) {
5083 cfg->real_offset = 0;
5085 cfg->real_offset = inline_offset;
5088 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5089 cfg->cil_offset_to_bb_len = header->code_size;
5091 cfg->current_method = method;
5093 if (cfg->verbose_level > 2)
5094 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5096 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5098 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5099 for (n = 0; n < sig->param_count; ++n)
5100 param_types [n + sig->hasthis] = sig->params [n];
5101 cfg->arg_types = param_types;
5103 dont_inline = g_list_prepend (dont_inline, method);
5104 if (cfg->method == method) {
5106 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5107 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5110 NEW_BBLOCK (cfg, start_bblock);
5111 cfg->bb_entry = start_bblock;
5112 start_bblock->cil_code = NULL;
5113 start_bblock->cil_length = 0;
5116 NEW_BBLOCK (cfg, end_bblock);
5117 cfg->bb_exit = end_bblock;
5118 end_bblock->cil_code = NULL;
5119 end_bblock->cil_length = 0;
5120 g_assert (cfg->num_bblocks == 2);
5122 arg_array = cfg->args;
5124 if (header->num_clauses) {
5125 cfg->spvars = g_hash_table_new (NULL, NULL);
5126 cfg->exvars = g_hash_table_new (NULL, NULL);
5128 /* handle exception clauses */
5129 for (i = 0; i < header->num_clauses; ++i) {
5130 MonoBasicBlock *try_bb;
5131 MonoExceptionClause *clause = &header->clauses [i];
5132 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5133 try_bb->real_offset = clause->try_offset;
5134 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5135 tblock->real_offset = clause->handler_offset;
5136 tblock->flags |= BB_EXCEPTION_HANDLER;
5138 link_bblock (cfg, try_bb, tblock);
5140 if (*(ip + clause->handler_offset) == CEE_POP)
5141 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5143 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5144 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5145 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5146 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5147 MONO_ADD_INS (tblock, ins);
5149 /* todo: is a fault block unsafe to optimize? */
5150 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5151 tblock->flags |= BB_EXCEPTION_UNSAFE;
5155 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5157 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5159 /* catch and filter blocks get the exception object on the stack */
5160 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5161 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5162 MonoInst *dummy_use;
5164 /* mostly like handle_stack_args (), but just sets the input args */
5165 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5166 tblock->in_scount = 1;
5167 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5168 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5171 * Add a dummy use for the exvar so its liveness info will be
5175 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5177 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5178 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5179 tblock->real_offset = clause->data.filter_offset;
5180 tblock->in_scount = 1;
5181 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5182 /* The filter block shares the exvar with the handler block */
5183 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5184 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5185 MONO_ADD_INS (tblock, ins);
5189 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5190 clause->data.catch_class &&
5191 cfg->generic_sharing_context &&
5192 mono_class_check_context_used (clause->data.catch_class)) {
5193 if (mono_method_get_context (method)->method_inst)
5194 GENERIC_SHARING_FAILURE (CEE_NOP);
5197 * In shared generic code with catch
5198 * clauses containing type variables
5199 * the exception handling code has to
5200 * be able to get to the rgctx.
5201 * Therefore we have to make sure that
5202 * the vtable/mrgctx argument (for
5203 * static or generic methods) or the
5204 * "this" argument (for non-static
5205 * methods) are live.
5207 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5208 mini_method_get_context (method)->method_inst ||
5209 method->klass->valuetype) {
5210 mono_get_vtable_var (cfg);
5212 MonoInst *dummy_use;
5214 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5219 arg_array = alloca (sizeof (MonoInst *) * num_args);
5220 cfg->cbb = start_bblock;
5221 cfg->args = arg_array;
5222 mono_save_args (cfg, sig, inline_args);
5225 /* FIRST CODE BLOCK */
5226 NEW_BBLOCK (cfg, bblock);
5227 bblock->cil_code = ip;
5231 ADD_BBLOCK (cfg, bblock);
5233 if (cfg->method == method) {
5234 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5235 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5236 MONO_INST_NEW (cfg, ins, OP_BREAK);
5237 MONO_ADD_INS (bblock, ins);
5241 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5242 secman = mono_security_manager_get_methods ();
5244 security = (secman && mono_method_has_declsec (method));
5245 /* at this point having security doesn't mean we have any code to generate */
5246 if (security && (cfg->method == method)) {
5247 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5248 * And we do not want to enter the next section (with allocation) if we
5249 * have nothing to generate */
5250 security = mono_declsec_get_demands (method, &actions);
5253 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5254 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5256 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5257 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5258 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5260 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5261 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5265 mono_custom_attrs_free (custom);
5268 custom = mono_custom_attrs_from_class (wrapped->klass);
5269 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5273 mono_custom_attrs_free (custom);
5276 /* not a P/Invoke after all */
5281 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5282 /* we use a separate basic block for the initialization code */
5283 NEW_BBLOCK (cfg, init_localsbb);
5284 cfg->bb_init = init_localsbb;
5285 init_localsbb->real_offset = cfg->real_offset;
5286 start_bblock->next_bb = init_localsbb;
5287 init_localsbb->next_bb = bblock;
5288 link_bblock (cfg, start_bblock, init_localsbb);
5289 link_bblock (cfg, init_localsbb, bblock);
5291 cfg->cbb = init_localsbb;
5293 start_bblock->next_bb = bblock;
5294 link_bblock (cfg, start_bblock, bblock);
5297 /* at this point we know, if security is TRUE, that some code needs to be generated */
5298 if (security && (cfg->method == method)) {
5301 mono_jit_stats.cas_demand_generation++;
5303 if (actions.demand.blob) {
5304 /* Add code for SecurityAction.Demand */
5305 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5306 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5307 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5308 mono_emit_method_call (cfg, secman->demand, args, NULL);
5310 if (actions.noncasdemand.blob) {
5311 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5312 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5313 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5314 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5315 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5316 mono_emit_method_call (cfg, secman->demand, args, NULL);
5318 if (actions.demandchoice.blob) {
5319 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5320 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5321 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5322 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5323 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5327 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5329 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5332 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5333 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5334 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5335 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5336 if (!(method->klass && method->klass->image &&
5337 mono_security_core_clr_is_platform_image (method->klass->image))) {
5338 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5342 if (!method_is_safe (method))
5343 emit_throw_verification_exception (cfg, bblock, ip);
5346 if (header->code_size == 0)
5349 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5354 if (cfg->method == method)
5355 mono_debug_init_method (cfg, bblock, breakpoint_id);
5357 for (n = 0; n < header->num_locals; ++n) {
5358 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5363 /* add a check for this != NULL to inlined methods */
5364 if (is_virtual_call) {
5367 NEW_ARGLOAD (cfg, arg_ins, 0);
5368 MONO_ADD_INS (cfg->cbb, arg_ins);
5369 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5370 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5371 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5374 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5375 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5378 start_new_bblock = 0;
5382 if (cfg->method == method)
5383 cfg->real_offset = ip - header->code;
5385 cfg->real_offset = inline_offset;
5390 if (start_new_bblock) {
5391 bblock->cil_length = ip - bblock->cil_code;
5392 if (start_new_bblock == 2) {
5393 g_assert (ip == tblock->cil_code);
5395 GET_BBLOCK (cfg, tblock, ip);
5397 bblock->next_bb = tblock;
5400 start_new_bblock = 0;
5401 for (i = 0; i < bblock->in_scount; ++i) {
5402 if (cfg->verbose_level > 3)
5403 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5404 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5408 g_slist_free (class_inits);
5411 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5412 link_bblock (cfg, bblock, tblock);
5413 if (sp != stack_start) {
5414 handle_stack_args (cfg, stack_start, sp - stack_start);
5416 CHECK_UNVERIFIABLE (cfg);
5418 bblock->next_bb = tblock;
5421 for (i = 0; i < bblock->in_scount; ++i) {
5422 if (cfg->verbose_level > 3)
5423 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5424 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5427 g_slist_free (class_inits);
5432 bblock->real_offset = cfg->real_offset;
5434 if ((cfg->method == method) && cfg->coverage_info) {
5435 guint32 cil_offset = ip - header->code;
5436 cfg->coverage_info->data [cil_offset].cil_code = ip;
5438 /* TODO: Use an increment here */
5439 #if defined(__i386__)
5440 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5441 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5443 MONO_ADD_INS (cfg->cbb, ins);
5445 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5446 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5450 if (cfg->verbose_level > 3)
5451 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5456 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5458 MONO_ADD_INS (bblock, ins);
5464 CHECK_STACK_OVF (1);
5465 n = (*ip)-CEE_LDARG_0;
5467 EMIT_NEW_ARGLOAD (cfg, ins, n);
5475 CHECK_STACK_OVF (1);
5476 n = (*ip)-CEE_LDLOC_0;
5478 EMIT_NEW_LOCLOAD (cfg, ins, n);
5487 n = (*ip)-CEE_STLOC_0;
5490 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5492 emit_stloc_ir (cfg, sp, header, n);
5499 CHECK_STACK_OVF (1);
5502 EMIT_NEW_ARGLOAD (cfg, ins, n);
5508 CHECK_STACK_OVF (1);
5511 NEW_ARGLOADA (cfg, ins, n);
5512 MONO_ADD_INS (cfg->cbb, ins);
5522 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5524 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5529 CHECK_STACK_OVF (1);
5532 EMIT_NEW_LOCLOAD (cfg, ins, n);
5536 case CEE_LDLOCA_S: {
5537 unsigned char *tmp_ip;
5539 CHECK_STACK_OVF (1);
5540 CHECK_LOCAL (ip [1]);
5542 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5548 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5557 CHECK_LOCAL (ip [1]);
5558 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5560 emit_stloc_ir (cfg, sp, header, ip [1]);
5565 CHECK_STACK_OVF (1);
5566 EMIT_NEW_PCONST (cfg, ins, NULL);
5567 ins->type = STACK_OBJ;
5572 CHECK_STACK_OVF (1);
5573 EMIT_NEW_ICONST (cfg, ins, -1);
5586 CHECK_STACK_OVF (1);
5587 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5593 CHECK_STACK_OVF (1);
5595 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5601 CHECK_STACK_OVF (1);
5602 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5608 CHECK_STACK_OVF (1);
5609 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5610 ins->type = STACK_I8;
5611 ins->dreg = alloc_dreg (cfg, STACK_I8);
5613 ins->inst_l = (gint64)read64 (ip);
5614 MONO_ADD_INS (bblock, ins);
5620 /* FIXME: we should really allocate this only late in the compilation process */
5621 mono_domain_lock (cfg->domain);
5622 f = mono_domain_alloc (cfg->domain, sizeof (float));
5623 mono_domain_unlock (cfg->domain);
5625 CHECK_STACK_OVF (1);
5626 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5627 ins->type = STACK_R8;
5628 ins->dreg = alloc_dreg (cfg, STACK_R8);
5632 MONO_ADD_INS (bblock, ins);
5640 /* FIXME: we should really allocate this only late in the compilation process */
5641 mono_domain_lock (cfg->domain);
5642 d = mono_domain_alloc (cfg->domain, sizeof (double));
5643 mono_domain_unlock (cfg->domain);
5645 CHECK_STACK_OVF (1);
5646 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5647 ins->type = STACK_R8;
5648 ins->dreg = alloc_dreg (cfg, STACK_R8);
5652 MONO_ADD_INS (bblock, ins);
5659 MonoInst *temp, *store;
5661 CHECK_STACK_OVF (1);
5665 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5666 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5668 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5671 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5684 if (sp [0]->type == STACK_R8)
5685 /* we need to pop the value from the x86 FP stack */
5686 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5693 if (stack_start != sp)
5695 token = read32 (ip + 1);
5696 /* FIXME: check the signature matches */
5697 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5702 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5703 GENERIC_SHARING_FAILURE (CEE_JMP);
5705 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5706 if (check_linkdemand (cfg, method, cmethod))
5708 CHECK_CFG_EXCEPTION;
5713 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5716 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5718 /* Handle tail calls similarly to calls */
5719 n = fsig->param_count + fsig->hasthis;
5721 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5722 call->method = cmethod;
5723 call->tail_call = TRUE;
5724 call->signature = mono_method_signature (cmethod);
5725 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5726 call->inst.inst_p0 = cmethod;
5727 for (i = 0; i < n; ++i)
5728 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5730 mono_arch_emit_call (cfg, call);
5731 MONO_ADD_INS (bblock, (MonoInst*)call);
5734 for (i = 0; i < num_args; ++i)
5735 /* Prevent arguments from being optimized away */
5736 arg_array [i]->flags |= MONO_INST_VOLATILE;
5738 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5739 ins = (MonoInst*)call;
5740 ins->inst_p0 = cmethod;
5741 MONO_ADD_INS (bblock, ins);
5745 start_new_bblock = 1;
5750 case CEE_CALLVIRT: {
5751 MonoInst *addr = NULL;
5752 MonoMethodSignature *fsig = NULL;
5754 int virtual = *ip == CEE_CALLVIRT;
5755 int calli = *ip == CEE_CALLI;
5756 gboolean pass_imt_from_rgctx = FALSE;
5757 MonoInst *imt_arg = NULL;
5758 gboolean pass_vtable = FALSE;
5759 gboolean pass_mrgctx = FALSE;
5760 MonoInst *vtable_arg = NULL;
5761 gboolean check_this = FALSE;
5764 token = read32 (ip + 1);
5771 if (method->wrapper_type != MONO_WRAPPER_NONE)
5772 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5774 fsig = mono_metadata_parse_signature (image, token);
5776 n = fsig->param_count + fsig->hasthis;
5778 MonoMethod *cil_method;
5780 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5781 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5782 cil_method = cmethod;
5783 } else if (constrained_call) {
5784 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5786 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5787 cil_method = cmethod;
5792 if (!dont_verify && !cfg->skip_visibility) {
5793 MonoMethod *target_method = cil_method;
5794 if (method->is_inflated) {
5795 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5797 if (!mono_method_can_access_method (method_definition, target_method) &&
5798 !mono_method_can_access_method (method, cil_method))
5799 METHOD_ACCESS_FAILURE;
5802 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5803 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5805 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5806 /* MS.NET seems to silently convert this to a callvirt */
5809 if (!cmethod->klass->inited)
5810 if (!mono_class_init (cmethod->klass))
5813 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5814 mini_class_is_system_array (cmethod->klass)) {
5815 array_rank = cmethod->klass->rank;
5816 fsig = mono_method_signature (cmethod);
5818 if (mono_method_signature (cmethod)->pinvoke) {
5819 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5820 check_for_pending_exc, FALSE);
5821 fsig = mono_method_signature (wrapper);
5822 } else if (constrained_call) {
5823 fsig = mono_method_signature (cmethod);
5825 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5829 mono_save_token_info (cfg, image, token, cmethod);
5831 n = fsig->param_count + fsig->hasthis;
5833 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5834 if (check_linkdemand (cfg, method, cmethod))
5836 CHECK_CFG_EXCEPTION;
5839 if (cmethod->string_ctor)
5840 g_assert_not_reached ();
5843 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5846 if (!cfg->generic_sharing_context && cmethod)
5847 g_assert (!mono_method_check_context_used (cmethod));
5851 //g_assert (!virtual || fsig->hasthis);
5855 if (constrained_call) {
5857 * We have the `constrained.' prefix opcode.
5859 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5863 * The type parameter is instantiated as a valuetype,
5864 * but that type doesn't override the method we're
5865 * calling, so we need to box `this'.
5867 dreg = alloc_dreg (cfg, STACK_VTYPE);
5868 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5869 ins->klass = constrained_call;
5870 sp [0] = handle_box (cfg, ins, constrained_call);
5871 } else if (!constrained_call->valuetype) {
5872 int dreg = alloc_preg (cfg);
5875 * The type parameter is instantiated as a reference
5876 * type. We have a managed pointer on the stack, so
5877 * we need to dereference it here.
5879 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5880 ins->type = STACK_OBJ;
5882 } else if (cmethod->klass->valuetype)
5884 constrained_call = NULL;
5887 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5891 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5892 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5893 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5894 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5895 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5898 * Pass vtable iff target method might
5899 * be shared, which means that sharing
5900 * is enabled for its class and its
5901 * context is sharable (and it's not a
5904 if (sharing_enabled && context_sharable &&
5905 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5909 if (cmethod && mini_method_get_context (cmethod) &&
5910 mini_method_get_context (cmethod)->method_inst) {
5911 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5912 MonoGenericContext *context = mini_method_get_context (cmethod);
5913 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5915 g_assert (!pass_vtable);
5917 if (sharing_enabled && context_sharable)
5921 if (cfg->generic_sharing_context && cmethod) {
5922 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5924 context_used = mono_method_check_context_used (cmethod);
5926 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5927 /* Generic method interface
5928 calls are resolved via a
5929 helper function and don't
5931 if (!cmethod_context || !cmethod_context->method_inst)
5932 pass_imt_from_rgctx = TRUE;
5936 * If a shared method calls another
5937 * shared method then the caller must
5938 * have a generic sharing context
5939 * because the magic trampoline
5940 * requires it. FIXME: We shouldn't
5941 * have to force the vtable/mrgctx
5942 * variable here. Instead there
5943 * should be a flag in the cfg to
5944 * request a generic sharing context.
5947 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5948 mono_get_vtable_var (cfg);
5953 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5955 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5957 CHECK_TYPELOAD (cmethod->klass);
5958 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5963 g_assert (!vtable_arg);
5966 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5968 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5971 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5972 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5979 if (pass_imt_from_rgctx) {
5980 g_assert (!pass_vtable);
5983 imt_arg = emit_get_rgctx_method (cfg, context_used,
5984 cmethod, MONO_RGCTX_INFO_METHOD);
5990 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5991 check->sreg1 = sp [0]->dreg;
5992 MONO_ADD_INS (cfg->cbb, check);
5995 /* Calling virtual generic methods */
5996 if (cmethod && virtual &&
5997 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5998 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5999 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6000 mono_method_signature (cmethod)->generic_param_count) {
6001 MonoInst *this_temp, *this_arg_temp, *store;
6002 MonoInst *iargs [4];
6004 g_assert (mono_method_signature (cmethod)->is_inflated);
6006 /* Prevent inlining of methods that contain indirect calls */
6009 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6010 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6011 g_assert (!imt_arg);
6013 imt_arg = emit_get_rgctx_method (cfg, context_used,
6014 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6017 g_assert (cmethod->is_inflated);
6018 EMIT_NEW_PCONST (cfg, imt_arg,
6019 ((MonoMethodInflated*)cmethod)->context.method_inst);
6021 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6025 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6026 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6027 MONO_ADD_INS (bblock, store);
6029 /* FIXME: This should be a managed pointer */
6030 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6032 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6034 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6035 cmethod, MONO_RGCTX_INFO_METHOD);
6036 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6037 addr = mono_emit_jit_icall (cfg,
6038 mono_helper_compile_generic_method, iargs);
6040 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6041 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6042 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6045 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6047 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6050 if (!MONO_TYPE_IS_VOID (fsig->ret))
6059 /* FIXME: runtime generic context pointer for jumps? */
6060 /* FIXME: handle this for generic sharing eventually */
6061 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6062 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6065 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6068 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6069 call->tail_call = TRUE;
6070 call->method = cmethod;
6071 call->signature = mono_method_signature (cmethod);
6074 /* Handle tail calls similarly to calls */
6075 call->inst.opcode = OP_TAILCALL;
6077 mono_arch_emit_call (cfg, call);
6080 * We implement tail calls by storing the actual arguments into the
6081 * argument variables, then emitting a CEE_JMP.
6083 for (i = 0; i < n; ++i) {
6084 /* Prevent argument from being register allocated */
6085 arg_array [i]->flags |= MONO_INST_VOLATILE;
6086 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6090 ins = (MonoInst*)call;
6091 ins->inst_p0 = cmethod;
6092 ins->inst_p1 = arg_array [0];
6093 MONO_ADD_INS (bblock, ins);
6094 link_bblock (cfg, bblock, end_bblock);
6095 start_new_bblock = 1;
6096 /* skip CEE_RET as well */
6102 /* Conversion to a JIT intrinsic */
6103 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6104 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6105 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6116 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6117 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6118 mono_method_check_inlining (cfg, cmethod) &&
6119 !g_list_find (dont_inline, cmethod)) {
6121 gboolean allways = FALSE;
6123 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6124 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6125 /* Prevent inlining of methods that call wrappers */
6127 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6131 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6133 cfg->real_offset += 5;
6136 if (!MONO_TYPE_IS_VOID (fsig->ret))
6137 /* *sp is already set by inline_method */
6140 inline_costs += costs;
6146 inline_costs += 10 * num_calls++;
6148 /* Tail recursion elimination */
6149 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6150 gboolean has_vtargs = FALSE;
6153 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6156 /* keep it simple */
6157 for (i = fsig->param_count - 1; i >= 0; i--) {
6158 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6163 for (i = 0; i < n; ++i)
6164 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6165 MONO_INST_NEW (cfg, ins, OP_BR);
6166 MONO_ADD_INS (bblock, ins);
6167 tblock = start_bblock->out_bb [0];
6168 link_bblock (cfg, bblock, tblock);
6169 ins->inst_target_bb = tblock;
6170 start_new_bblock = 1;
6172 /* skip the CEE_RET, too */
6173 if (ip_in_bb (cfg, bblock, ip + 5))
6183 /* Generic sharing */
6184 /* FIXME: only do this for generic methods if
6185 they are not shared! */
6186 if (context_used && !imt_arg && !array_rank &&
6187 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6188 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6189 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6190 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6193 g_assert (cfg->generic_sharing_context && cmethod);
6197 * We are compiling a call to a
6198 * generic method from shared code,
6199 * which means that we have to look up
6200 * the method in the rgctx and do an
6203 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6206 /* Indirect calls */
6208 g_assert (!imt_arg);
6210 if (*ip == CEE_CALL)
6211 g_assert (context_used);
6212 else if (*ip == CEE_CALLI)
6213 g_assert (!vtable_arg);
6215 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6216 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6218 /* Prevent inlining of methods with indirect calls */
6222 #ifdef MONO_ARCH_RGCTX_REG
6224 int rgctx_reg = mono_alloc_preg (cfg);
6226 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6227 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6228 call = (MonoCallInst*)ins;
6229 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6230 cfg->uses_rgctx_reg = TRUE;
6235 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6237 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6238 if (fsig->pinvoke && !fsig->ret->byref) {
6242 * Native code might return non register sized integers
6243 * without initializing the upper bits.
6245 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6246 case OP_LOADI1_MEMBASE:
6247 widen_op = OP_ICONV_TO_I1;
6249 case OP_LOADU1_MEMBASE:
6250 widen_op = OP_ICONV_TO_U1;
6252 case OP_LOADI2_MEMBASE:
6253 widen_op = OP_ICONV_TO_I2;
6255 case OP_LOADU2_MEMBASE:
6256 widen_op = OP_ICONV_TO_U2;
6262 if (widen_op != -1) {
6263 int dreg = alloc_preg (cfg);
6266 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6267 widen->type = ins->type;
6284 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6285 if (sp [fsig->param_count]->type == STACK_OBJ) {
6286 MonoInst *iargs [2];
6289 iargs [1] = sp [fsig->param_count];
6291 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6294 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6295 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6296 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6297 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6299 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6302 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6303 if (!cmethod->klass->element_class->valuetype && !readonly)
6304 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6307 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6310 g_assert_not_reached ();
6318 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6320 if (!MONO_TYPE_IS_VOID (fsig->ret))
6331 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6333 } else if (imt_arg) {
6334 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6336 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6339 if (!MONO_TYPE_IS_VOID (fsig->ret))
6347 if (cfg->method != method) {
6348 /* return from inlined method */
6350 * If in_count == 0, that means the ret is unreachable due to
6351 * being preceeded by a throw. In that case, inline_method () will
6352 * handle setting the return value
6353 * (test case: test_0_inline_throw ()).
6355 if (return_var && cfg->cbb->in_count) {
6359 //g_assert (returnvar != -1);
6360 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6361 cfg->ret_var_set = TRUE;
6365 MonoType *ret_type = mono_method_signature (method)->ret;
6367 g_assert (!return_var);
6370 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6373 if (!cfg->vret_addr) {
6376 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6378 EMIT_NEW_RETLOADA (cfg, ret_addr);
6380 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6381 ins->klass = mono_class_from_mono_type (ret_type);
6384 #ifdef MONO_ARCH_SOFT_FLOAT
6385 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6386 MonoInst *iargs [1];
6390 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6391 mono_arch_emit_setret (cfg, method, conv);
6393 mono_arch_emit_setret (cfg, method, *sp);
6396 mono_arch_emit_setret (cfg, method, *sp);
6401 if (sp != stack_start)
6403 MONO_INST_NEW (cfg, ins, OP_BR);
6405 ins->inst_target_bb = end_bblock;
6406 MONO_ADD_INS (bblock, ins);
6407 link_bblock (cfg, bblock, end_bblock);
6408 start_new_bblock = 1;
6412 MONO_INST_NEW (cfg, ins, OP_BR);
6414 target = ip + 1 + (signed char)(*ip);
6416 GET_BBLOCK (cfg, tblock, target);
6417 link_bblock (cfg, bblock, tblock);
6418 ins->inst_target_bb = tblock;
6419 if (sp != stack_start) {
6420 handle_stack_args (cfg, stack_start, sp - stack_start);
6422 CHECK_UNVERIFIABLE (cfg);
6424 MONO_ADD_INS (bblock, ins);
6425 start_new_bblock = 1;
6426 inline_costs += BRANCH_COST;
6440 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6442 target = ip + 1 + *(signed char*)ip;
6448 inline_costs += BRANCH_COST;
6452 MONO_INST_NEW (cfg, ins, OP_BR);
6455 target = ip + 4 + (gint32)read32(ip);
6457 GET_BBLOCK (cfg, tblock, target);
6458 link_bblock (cfg, bblock, tblock);
6459 ins->inst_target_bb = tblock;
6460 if (sp != stack_start) {
6461 handle_stack_args (cfg, stack_start, sp - stack_start);
6463 CHECK_UNVERIFIABLE (cfg);
6466 MONO_ADD_INS (bblock, ins);
6468 start_new_bblock = 1;
6469 inline_costs += BRANCH_COST;
6476 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6477 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6478 guint32 opsize = is_short ? 1 : 4;
6480 CHECK_OPSIZE (opsize);
6482 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6485 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6490 GET_BBLOCK (cfg, tblock, target);
6491 link_bblock (cfg, bblock, tblock);
6492 GET_BBLOCK (cfg, tblock, ip);
6493 link_bblock (cfg, bblock, tblock);
6495 if (sp != stack_start) {
6496 handle_stack_args (cfg, stack_start, sp - stack_start);
6497 CHECK_UNVERIFIABLE (cfg);
6500 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6501 cmp->sreg1 = sp [0]->dreg;
6502 type_from_op (cmp, sp [0], NULL);
6505 #if SIZEOF_VOID_P == 4
6506 if (cmp->opcode == OP_LCOMPARE_IMM) {
6507 /* Convert it to OP_LCOMPARE */
6508 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6509 ins->type = STACK_I8;
6510 ins->dreg = alloc_dreg (cfg, STACK_I8);
6512 MONO_ADD_INS (bblock, ins);
6513 cmp->opcode = OP_LCOMPARE;
6514 cmp->sreg2 = ins->dreg;
6517 MONO_ADD_INS (bblock, cmp);
6519 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6520 type_from_op (ins, sp [0], NULL);
6521 MONO_ADD_INS (bblock, ins);
6522 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6523 GET_BBLOCK (cfg, tblock, target);
6524 ins->inst_true_bb = tblock;
6525 GET_BBLOCK (cfg, tblock, ip);
6526 ins->inst_false_bb = tblock;
6527 start_new_bblock = 2;
6530 inline_costs += BRANCH_COST;
6545 MONO_INST_NEW (cfg, ins, *ip);
6547 target = ip + 4 + (gint32)read32(ip);
6553 inline_costs += BRANCH_COST;
6557 MonoBasicBlock **targets;
6558 MonoBasicBlock *default_bblock;
6559 MonoJumpInfoBBTable *table;
6561 int offset_reg = alloc_preg (cfg);
6562 int target_reg = alloc_preg (cfg);
6563 int table_reg = alloc_preg (cfg);
6564 int sum_reg = alloc_preg (cfg);
6569 n = read32 (ip + 1);
6572 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6576 CHECK_OPSIZE (n * sizeof (guint32));
6577 target = ip + n * sizeof (guint32);
6579 GET_BBLOCK (cfg, default_bblock, target);
6581 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6582 for (i = 0; i < n; ++i) {
6583 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6584 targets [i] = tblock;
6588 if (sp != stack_start) {
6590 * Link the current bb with the targets as well, so handle_stack_args
6591 * will set their in_stack correctly.
6593 link_bblock (cfg, bblock, default_bblock);
6594 for (i = 0; i < n; ++i)
6595 link_bblock (cfg, bblock, targets [i]);
6597 handle_stack_args (cfg, stack_start, sp - stack_start);
6599 CHECK_UNVERIFIABLE (cfg);
6602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6606 for (i = 0; i < n; ++i)
6607 link_bblock (cfg, bblock, targets [i]);
6609 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6610 table->table = targets;
6611 table->table_size = n;
6614 /* ARM implements SWITCH statements differently */
6615 /* FIXME: Make it use the generic implementation */
6616 /* the backend code will deal with aot vs normal case */
6617 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6618 ins->sreg1 = src1->dreg;
6619 ins->inst_p0 = table;
6620 ins->inst_many_bb = targets;
6621 ins->klass = GUINT_TO_POINTER (n);
6622 MONO_ADD_INS (cfg->cbb, ins);
6624 if (sizeof (gpointer) == 8)
6625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6629 #if SIZEOF_VOID_P == 8
6630 /* The upper word might not be zero, and we add it to a 64 bit address later */
6631 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6634 if (cfg->compile_aot) {
6635 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6637 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6638 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6639 ins->inst_p0 = table;
6640 ins->dreg = table_reg;
6641 MONO_ADD_INS (cfg->cbb, ins);
6644 /* FIXME: Use load_memindex */
6645 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6647 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6649 start_new_bblock = 1;
6650 inline_costs += (BRANCH_COST * 2);
6670 dreg = alloc_freg (cfg);
6673 dreg = alloc_lreg (cfg);
6676 dreg = alloc_preg (cfg);
6679 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6680 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6681 ins->flags |= ins_flag;
6683 MONO_ADD_INS (bblock, ins);
6698 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6699 ins->flags |= ins_flag;
6701 MONO_ADD_INS (bblock, ins);
6709 MONO_INST_NEW (cfg, ins, (*ip));
6711 ins->sreg1 = sp [0]->dreg;
6712 ins->sreg2 = sp [1]->dreg;
6713 type_from_op (ins, sp [0], sp [1]);
6715 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6717 /* Use the immediate opcodes if possible */
6718 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6719 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6720 if (imm_opcode != -1) {
6721 ins->opcode = imm_opcode;
6722 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6725 sp [1]->opcode = OP_NOP;
6729 MONO_ADD_INS ((cfg)->cbb, (ins));
6732 mono_decompose_opcode (cfg, ins);
6749 MONO_INST_NEW (cfg, ins, (*ip));
6751 ins->sreg1 = sp [0]->dreg;
6752 ins->sreg2 = sp [1]->dreg;
6753 type_from_op (ins, sp [0], sp [1]);
6755 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6756 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6758 /* FIXME: Pass opcode to is_inst_imm */
6760 /* Use the immediate opcodes if possible */
6761 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6764 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6765 if (imm_opcode != -1) {
6766 ins->opcode = imm_opcode;
6767 if (sp [1]->opcode == OP_I8CONST) {
6768 #if SIZEOF_VOID_P == 8
6769 ins->inst_imm = sp [1]->inst_l;
6771 ins->inst_ls_word = sp [1]->inst_ls_word;
6772 ins->inst_ms_word = sp [1]->inst_ms_word;
6776 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6779 sp [1]->opcode = OP_NOP;
6782 MONO_ADD_INS ((cfg)->cbb, (ins));
6785 mono_decompose_opcode (cfg, ins);
6798 case CEE_CONV_OVF_I8:
6799 case CEE_CONV_OVF_U8:
6803 /* Special case this earlier so we have long constants in the IR */
6804 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6805 int data = sp [-1]->inst_c0;
6806 sp [-1]->opcode = OP_I8CONST;
6807 sp [-1]->type = STACK_I8;
6808 #if SIZEOF_VOID_P == 8
6809 if ((*ip) == CEE_CONV_U8)
6810 sp [-1]->inst_c0 = (guint32)data;
6812 sp [-1]->inst_c0 = data;
6814 sp [-1]->inst_ls_word = data;
6815 if ((*ip) == CEE_CONV_U8)
6816 sp [-1]->inst_ms_word = 0;
6818 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6820 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6827 case CEE_CONV_OVF_I4:
6828 case CEE_CONV_OVF_I1:
6829 case CEE_CONV_OVF_I2:
6830 case CEE_CONV_OVF_I:
6831 case CEE_CONV_OVF_U:
6834 if (sp [-1]->type == STACK_R8) {
6835 ADD_UNOP (CEE_CONV_OVF_I8);
6842 case CEE_CONV_OVF_U1:
6843 case CEE_CONV_OVF_U2:
6844 case CEE_CONV_OVF_U4:
6847 if (sp [-1]->type == STACK_R8) {
6848 ADD_UNOP (CEE_CONV_OVF_U8);
6855 case CEE_CONV_OVF_I1_UN:
6856 case CEE_CONV_OVF_I2_UN:
6857 case CEE_CONV_OVF_I4_UN:
6858 case CEE_CONV_OVF_I8_UN:
6859 case CEE_CONV_OVF_U1_UN:
6860 case CEE_CONV_OVF_U2_UN:
6861 case CEE_CONV_OVF_U4_UN:
6862 case CEE_CONV_OVF_U8_UN:
6863 case CEE_CONV_OVF_I_UN:
6864 case CEE_CONV_OVF_U_UN:
6874 case CEE_ADD_OVF_UN:
6876 case CEE_MUL_OVF_UN:
6878 case CEE_SUB_OVF_UN:
6886 token = read32 (ip + 1);
6887 klass = mini_get_class (method, token, generic_context);
6888 CHECK_TYPELOAD (klass);
6890 if (generic_class_is_reference_type (cfg, klass)) {
6891 MonoInst *store, *load;
6892 int dreg = alloc_preg (cfg);
6894 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6895 load->flags |= ins_flag;
6896 MONO_ADD_INS (cfg->cbb, load);
6898 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6899 store->flags |= ins_flag;
6900 MONO_ADD_INS (cfg->cbb, store);
6902 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6914 token = read32 (ip + 1);
6915 klass = mini_get_class (method, token, generic_context);
6916 CHECK_TYPELOAD (klass);
6918 /* Optimize the common ldobj+stloc combination */
6928 loc_index = ip [5] - CEE_STLOC_0;
6935 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6936 CHECK_LOCAL (loc_index);
6938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6939 ins->dreg = cfg->locals [loc_index]->dreg;
6945 /* Optimize the ldobj+stobj combination */
6946 /* The reference case ends up being a load+store anyway */
6947 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6952 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6959 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6968 CHECK_STACK_OVF (1);
6970 n = read32 (ip + 1);
6972 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6973 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6974 ins->type = STACK_OBJ;
6977 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6978 MonoInst *iargs [1];
6980 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6981 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6983 if (cfg->opt & MONO_OPT_SHARED) {
6984 MonoInst *iargs [3];
6986 if (cfg->compile_aot) {
6987 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6989 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6990 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6991 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6992 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6993 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6995 if (bblock->out_of_line) {
6996 MonoInst *iargs [2];
6998 if (cfg->method->klass->image == mono_defaults.corlib) {
7000 * Avoid relocations in AOT and save some space by using a
7001 * version of helper_ldstr specialized to mscorlib.
7003 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7004 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7006 /* Avoid creating the string object */
7007 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7008 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7009 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7013 if (cfg->compile_aot) {
7014 NEW_LDSTRCONST (cfg, ins, image, n);
7016 MONO_ADD_INS (bblock, ins);
7019 NEW_PCONST (cfg, ins, NULL);
7020 ins->type = STACK_OBJ;
7021 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7023 MONO_ADD_INS (bblock, ins);
7032 MonoInst *iargs [2];
7033 MonoMethodSignature *fsig;
7036 MonoInst *vtable_arg = NULL;
7039 token = read32 (ip + 1);
7040 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7043 fsig = mono_method_get_signature (cmethod, image, token);
7045 mono_save_token_info (cfg, image, token, cmethod);
7047 if (!mono_class_init (cmethod->klass))
7050 if (cfg->generic_sharing_context)
7051 context_used = mono_method_check_context_used (cmethod);
7053 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7054 if (check_linkdemand (cfg, method, cmethod))
7056 CHECK_CFG_EXCEPTION;
7057 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7058 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7061 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7062 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7063 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7065 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7066 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7068 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7072 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7073 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7075 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7077 CHECK_TYPELOAD (cmethod->klass);
7078 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7083 n = fsig->param_count;
7087 * Generate smaller code for the common newobj <exception> instruction in
7088 * argument checking code.
7090 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7091 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7092 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7093 MonoInst *iargs [3];
7095 g_assert (!vtable_arg);
7099 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7102 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7106 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7111 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7114 g_assert_not_reached ();
7122 /* move the args to allow room for 'this' in the first position */
7128 /* check_call_signature () requires sp[0] to be set */
7129 this_ins.type = STACK_OBJ;
7131 if (check_call_signature (cfg, fsig, sp))
7136 if (mini_class_is_system_array (cmethod->klass)) {
7138 GENERIC_SHARING_FAILURE (*ip);
7139 g_assert (!context_used);
7140 g_assert (!vtable_arg);
7141 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7143 /* Avoid varargs in the common case */
7144 if (fsig->param_count == 1)
7145 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7146 else if (fsig->param_count == 2)
7147 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7149 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7150 } else if (cmethod->string_ctor) {
7151 g_assert (!context_used);
7152 g_assert (!vtable_arg);
7153 /* we simply pass a null pointer */
7154 EMIT_NEW_PCONST (cfg, *sp, NULL);
7155 /* now call the string ctor */
7156 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7158 MonoInst* callvirt_this_arg = NULL;
7160 if (cmethod->klass->valuetype) {
7161 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7162 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7163 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7168 * The code generated by mini_emit_virtual_call () expects
7169 * iargs [0] to be a boxed instance, but luckily the vcall
7170 * will be transformed into a normal call there.
7172 } else if (context_used) {
7176 if (cfg->opt & MONO_OPT_SHARED)
7177 rgctx_info = MONO_RGCTX_INFO_KLASS;
7179 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7180 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7182 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7185 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7187 CHECK_TYPELOAD (cmethod->klass);
7190 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7191 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7192 * As a workaround, we call class cctors before allocating objects.
7194 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7195 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7196 if (cfg->verbose_level > 2)
7197 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7198 class_inits = g_slist_prepend (class_inits, vtable);
7201 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7206 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7208 /* Now call the actual ctor */
7209 /* Avoid virtual calls to ctors if possible */
7210 if (cmethod->klass->marshalbyref)
7211 callvirt_this_arg = sp [0];
7213 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7214 mono_method_check_inlining (cfg, cmethod) &&
7215 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7216 !g_list_find (dont_inline, cmethod)) {
7219 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7220 cfg->real_offset += 5;
7223 inline_costs += costs - 5;
7226 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7228 } else if (context_used &&
7229 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7230 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7231 MonoInst *cmethod_addr;
7233 g_assert (!callvirt_this_arg);
7235 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7236 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7238 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7241 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7242 callvirt_this_arg, NULL, vtable_arg);
7246 if (alloc == NULL) {
7248 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7249 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7263 token = read32 (ip + 1);
7264 klass = mini_get_class (method, token, generic_context);
7265 CHECK_TYPELOAD (klass);
7266 if (sp [0]->type != STACK_OBJ)
7269 if (cfg->generic_sharing_context)
7270 context_used = mono_class_check_context_used (klass);
7279 args [1] = emit_get_rgctx_klass (cfg, context_used,
7280 klass, MONO_RGCTX_INFO_KLASS);
7282 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7286 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7287 MonoMethod *mono_castclass;
7288 MonoInst *iargs [1];
7291 mono_castclass = mono_marshal_get_castclass (klass);
7294 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7295 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7296 g_assert (costs > 0);
7299 cfg->real_offset += 5;
7304 inline_costs += costs;
7307 ins = handle_castclass (cfg, klass, *sp);
7317 token = read32 (ip + 1);
7318 klass = mini_get_class (method, token, generic_context);
7319 CHECK_TYPELOAD (klass);
7320 if (sp [0]->type != STACK_OBJ)
7323 if (cfg->generic_sharing_context)
7324 context_used = mono_class_check_context_used (klass);
7333 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7335 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7339 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7340 MonoMethod *mono_isinst;
7341 MonoInst *iargs [1];
7344 mono_isinst = mono_marshal_get_isinst (klass);
7347 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7348 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7349 g_assert (costs > 0);
7352 cfg->real_offset += 5;
7357 inline_costs += costs;
7360 ins = handle_isinst (cfg, klass, *sp);
7367 case CEE_UNBOX_ANY: {
7371 token = read32 (ip + 1);
7372 klass = mini_get_class (method, token, generic_context);
7373 CHECK_TYPELOAD (klass);
7375 mono_save_token_info (cfg, image, token, klass);
7377 if (cfg->generic_sharing_context)
7378 context_used = mono_class_check_context_used (klass);
7380 if (generic_class_is_reference_type (cfg, klass)) {
7383 MonoInst *iargs [2];
7388 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7389 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7393 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7394 MonoMethod *mono_castclass;
7395 MonoInst *iargs [1];
7398 mono_castclass = mono_marshal_get_castclass (klass);
7401 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7402 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7404 g_assert (costs > 0);
7407 cfg->real_offset += 5;
7411 inline_costs += costs;
7413 ins = handle_castclass (cfg, klass, *sp);
7421 if (mono_class_is_nullable (klass)) {
7422 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7429 ins = handle_unbox (cfg, klass, sp, context_used);
7435 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7448 token = read32 (ip + 1);
7449 klass = mini_get_class (method, token, generic_context);
7450 CHECK_TYPELOAD (klass);
7452 mono_save_token_info (cfg, image, token, klass);
7454 if (cfg->generic_sharing_context)
7455 context_used = mono_class_check_context_used (klass);
7457 if (generic_class_is_reference_type (cfg, klass)) {
7463 if (klass == mono_defaults.void_class)
7465 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7467 /* frequent check in generic code: box (struct), brtrue */
7468 if (!mono_class_is_nullable (klass) &&
7469 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7470 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7472 MONO_INST_NEW (cfg, ins, OP_BR);
7473 if (*ip == CEE_BRTRUE_S) {
7476 target = ip + 1 + (signed char)(*ip);
7481 target = ip + 4 + (gint)(read32 (ip));
7484 GET_BBLOCK (cfg, tblock, target);
7485 link_bblock (cfg, bblock, tblock);
7486 ins->inst_target_bb = tblock;
7487 GET_BBLOCK (cfg, tblock, ip);
7489 * This leads to some inconsistency, since the two bblocks are not
7490 * really connected, but it is needed for handling stack arguments
7491 * correct (See test_0_box_brtrue_opt_regress_81102).
7493 link_bblock (cfg, bblock, tblock);
7494 if (sp != stack_start) {
7495 handle_stack_args (cfg, stack_start, sp - stack_start);
7497 CHECK_UNVERIFIABLE (cfg);
7499 MONO_ADD_INS (bblock, ins);
7500 start_new_bblock = 1;
7508 if (cfg->opt & MONO_OPT_SHARED)
7509 rgctx_info = MONO_RGCTX_INFO_KLASS;
7511 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7512 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7513 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7515 *sp++ = handle_box (cfg, val, klass);
7526 token = read32 (ip + 1);
7527 klass = mini_get_class (method, token, generic_context);
7528 CHECK_TYPELOAD (klass);
7530 mono_save_token_info (cfg, image, token, klass);
7532 if (cfg->generic_sharing_context)
7533 context_used = mono_class_check_context_used (klass);
7535 if (mono_class_is_nullable (klass)) {
7538 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7539 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7543 ins = handle_unbox (cfg, klass, sp, context_used);
7553 MonoClassField *field;
7557 if (*ip == CEE_STFLD) {
7564 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7566 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7569 token = read32 (ip + 1);
7570 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7571 field = mono_method_get_wrapper_data (method, token);
7572 klass = field->parent;
7575 field = mono_field_from_token (image, token, &klass, generic_context);
7579 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7580 FIELD_ACCESS_FAILURE;
7581 mono_class_init (klass);
7583 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7584 if (*ip == CEE_STFLD) {
7585 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7587 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7588 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7589 MonoInst *iargs [5];
7592 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7593 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7594 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7598 if (cfg->opt & MONO_OPT_INLINE) {
7599 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7600 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7601 g_assert (costs > 0);
7604 cfg->real_offset += 5;
7607 inline_costs += costs;
7610 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7615 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7617 store->flags |= ins_flag;
7624 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7625 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7626 MonoInst *iargs [4];
7629 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7630 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7631 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7632 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7633 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7634 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7636 g_assert (costs > 0);
7639 cfg->real_offset += 5;
7643 inline_costs += costs;
7646 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7650 if (sp [0]->type == STACK_VTYPE) {
7653 /* Have to compute the address of the variable */
7655 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7657 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7659 g_assert (var->klass == klass);
7661 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7665 if (*ip == CEE_LDFLDA) {
7666 dreg = alloc_preg (cfg);
7668 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7669 ins->klass = mono_class_from_mono_type (field->type);
7670 ins->type = STACK_MP;
7675 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7676 load->flags |= ins_flag;
7687 MonoClassField *field;
7688 gpointer addr = NULL;
7689 gboolean is_special_static;
7692 token = read32 (ip + 1);
7694 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7695 field = mono_method_get_wrapper_data (method, token);
7696 klass = field->parent;
7699 field = mono_field_from_token (image, token, &klass, generic_context);
7702 mono_class_init (klass);
7703 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7704 FIELD_ACCESS_FAILURE;
7707 * We can only support shared generic static
7708 * field access on architectures where the
7709 * trampoline code has been extended to handle
7710 * the generic class init.
7712 #ifndef MONO_ARCH_VTABLE_REG
7713 GENERIC_SHARING_FAILURE (*ip);
7716 if (cfg->generic_sharing_context)
7717 context_used = mono_class_check_context_used (klass);
7719 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7721 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7722 * to be called here.
7724 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7725 mono_class_vtable (cfg->domain, klass);
7726 CHECK_TYPELOAD (klass);
7728 mono_domain_lock (cfg->domain);
7729 if (cfg->domain->special_static_fields)
7730 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7731 mono_domain_unlock (cfg->domain);
7733 is_special_static = mono_class_field_is_special_static (field);
7735 /* Generate IR to compute the field address */
7737 if ((cfg->opt & MONO_OPT_SHARED) ||
7738 (cfg->compile_aot && is_special_static) ||
7739 (context_used && is_special_static)) {
7740 MonoInst *iargs [2];
7742 g_assert (field->parent);
7743 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7745 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7746 field, MONO_RGCTX_INFO_CLASS_FIELD);
7748 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7750 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7751 } else if (context_used) {
7752 MonoInst *static_data;
7755 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7756 method->klass->name_space, method->klass->name, method->name,
7757 depth, field->offset);
7760 if (mono_class_needs_cctor_run (klass, method)) {
7764 vtable = emit_get_rgctx_klass (cfg, context_used,
7765 klass, MONO_RGCTX_INFO_VTABLE);
7767 // FIXME: This doesn't work since it tries to pass the argument
7768 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7770 * The vtable pointer is always passed in a register regardless of
7771 * the calling convention, so assign it manually, and make a call
7772 * using a signature without parameters.
7774 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7775 #ifdef MONO_ARCH_VTABLE_REG
7776 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7777 cfg->uses_vtable_reg = TRUE;
7784 * The pointer we're computing here is
7786 * super_info.static_data + field->offset
7788 static_data = emit_get_rgctx_klass (cfg, context_used,
7789 klass, MONO_RGCTX_INFO_STATIC_DATA);
7791 if (field->offset == 0) {
7794 int addr_reg = mono_alloc_preg (cfg);
7795 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7797 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7798 MonoInst *iargs [2];
7800 g_assert (field->parent);
7801 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7802 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7803 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7805 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7807 CHECK_TYPELOAD (klass);
7809 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7810 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7811 if (cfg->verbose_level > 2)
7812 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7813 class_inits = g_slist_prepend (class_inits, vtable);
7815 if (cfg->run_cctors) {
7817 /* This makes so that inline cannot trigger */
7818 /* .cctors: too many apps depend on them */
7819 /* running with a specific order... */
7820 if (! vtable->initialized)
7822 ex = mono_runtime_class_init_full (vtable, FALSE);
7824 set_exception_object (cfg, ex);
7825 goto exception_exit;
7829 addr = (char*)vtable->data + field->offset;
7831 if (cfg->compile_aot)
7832 EMIT_NEW_SFLDACONST (cfg, ins, field);
7834 EMIT_NEW_PCONST (cfg, ins, addr);
7837 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7838 * This could be later optimized to do just a couple of
7839 * memory dereferences with constant offsets.
7841 MonoInst *iargs [1];
7842 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7843 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7847 /* Generate IR to do the actual load/store operation */
7849 if (*ip == CEE_LDSFLDA) {
7850 ins->klass = mono_class_from_mono_type (field->type);
7852 } else if (*ip == CEE_STSFLD) {
7857 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7858 store->flags |= ins_flag;
7861 gboolean is_const = FALSE;
7862 MonoVTable *vtable = NULL;
7864 if (!context_used) {
7865 vtable = mono_class_vtable (cfg->domain, klass);
7866 CHECK_TYPELOAD (klass);
7868 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7869 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7870 gpointer addr = (char*)vtable->data + field->offset;
7871 int ro_type = field->type->type;
7872 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7873 ro_type = field->type->data.klass->enum_basetype->type;
7875 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7878 case MONO_TYPE_BOOLEAN:
7880 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7884 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7887 case MONO_TYPE_CHAR:
7889 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7893 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7898 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7902 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7907 case MONO_TYPE_STRING:
7908 case MONO_TYPE_OBJECT:
7909 case MONO_TYPE_CLASS:
7910 case MONO_TYPE_SZARRAY:
7912 case MONO_TYPE_FNPTR:
7913 case MONO_TYPE_ARRAY:
7914 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7915 type_to_eval_stack_type ((cfg), field->type, *sp);
7920 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7925 case MONO_TYPE_VALUETYPE:
7935 CHECK_STACK_OVF (1);
7937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7938 load->flags |= ins_flag;
7950 token = read32 (ip + 1);
7951 klass = mini_get_class (method, token, generic_context);
7952 CHECK_TYPELOAD (klass);
7953 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7954 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7965 const char *data_ptr;
7972 token = read32 (ip + 1);
7974 klass = mini_get_class (method, token, generic_context);
7975 CHECK_TYPELOAD (klass);
7977 if (cfg->generic_sharing_context)
7978 context_used = mono_class_check_context_used (klass);
7983 /* FIXME: Decompose later to help abcrem */
7986 args [0] = emit_get_rgctx_klass (cfg, context_used,
7987 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7992 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7994 if (cfg->opt & MONO_OPT_SHARED) {
7995 /* Decompose now to avoid problems with references to the domainvar */
7996 MonoInst *iargs [3];
7998 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7999 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8002 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8004 /* Decompose later since it is needed by abcrem */
8005 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8006 ins->dreg = alloc_preg (cfg);
8007 ins->sreg1 = sp [0]->dreg;
8008 ins->inst_newa_class = klass;
8009 ins->type = STACK_OBJ;
8011 MONO_ADD_INS (cfg->cbb, ins);
8012 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8013 cfg->cbb->has_array_access = TRUE;
8015 /* Needed so mono_emit_load_get_addr () gets called */
8016 mono_get_got_var (cfg);
8026 * we inline/optimize the initialization sequence if possible.
8027 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8028 * for small sizes open code the memcpy
8029 * ensure the rva field is big enough
8031 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
8032 MonoMethod *memcpy_method = get_memcpy_method ();
8033 MonoInst *iargs [3];
8034 int add_reg = alloc_preg (cfg);
8036 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8037 if (cfg->compile_aot) {
8038 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
8040 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8042 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8043 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8052 if (sp [0]->type != STACK_OBJ)
8055 dreg = alloc_preg (cfg);
8056 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8057 ins->dreg = alloc_preg (cfg);
8058 ins->sreg1 = sp [0]->dreg;
8059 ins->type = STACK_I4;
8060 MONO_ADD_INS (cfg->cbb, ins);
8061 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8062 cfg->cbb->has_array_access = TRUE;
8070 if (sp [0]->type != STACK_OBJ)
8073 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8075 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8076 CHECK_TYPELOAD (klass);
8077 /* we need to make sure that this array is exactly the type it needs
8078 * to be for correctness. the wrappers are lax with their usage
8079 * so we need to ignore them here
8081 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8082 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8085 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8089 case CEE_LDELEM_ANY:
8100 case CEE_LDELEM_REF: {
8106 if (*ip == CEE_LDELEM_ANY) {
8108 token = read32 (ip + 1);
8109 klass = mini_get_class (method, token, generic_context);
8110 CHECK_TYPELOAD (klass);
8111 mono_class_init (klass);
8114 klass = array_access_to_klass (*ip);
8116 if (sp [0]->type != STACK_OBJ)
8119 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8121 if (sp [1]->opcode == OP_ICONST) {
8122 int array_reg = sp [0]->dreg;
8123 int index_reg = sp [1]->dreg;
8124 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8126 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8129 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8133 if (*ip == CEE_LDELEM_ANY)
8146 case CEE_STELEM_REF:
8147 case CEE_STELEM_ANY: {
8153 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8155 if (*ip == CEE_STELEM_ANY) {
8157 token = read32 (ip + 1);
8158 klass = mini_get_class (method, token, generic_context);
8159 CHECK_TYPELOAD (klass);
8160 mono_class_init (klass);
8163 klass = array_access_to_klass (*ip);
8165 if (sp [0]->type != STACK_OBJ)
8168 /* storing a NULL doesn't need any of the complex checks in stelemref */
8169 if (generic_class_is_reference_type (cfg, klass) &&
8170 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8171 MonoMethod* helper = mono_marshal_get_stelemref ();
8172 MonoInst *iargs [3];
8174 if (sp [0]->type != STACK_OBJ)
8176 if (sp [2]->type != STACK_OBJ)
8183 mono_emit_method_call (cfg, helper, iargs, NULL);
8185 if (sp [1]->opcode == OP_ICONST) {
8186 int array_reg = sp [0]->dreg;
8187 int index_reg = sp [1]->dreg;
8188 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8190 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8191 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8193 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8194 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8198 if (*ip == CEE_STELEM_ANY)
8205 case CEE_CKFINITE: {
8209 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8210 ins->sreg1 = sp [0]->dreg;
8211 ins->dreg = alloc_freg (cfg);
8212 ins->type = STACK_R8;
8213 MONO_ADD_INS (bblock, ins);
8216 mono_decompose_opcode (cfg, ins);
8221 case CEE_REFANYVAL: {
8222 MonoInst *src_var, *src;
8224 int klass_reg = alloc_preg (cfg);
8225 int dreg = alloc_preg (cfg);
8228 MONO_INST_NEW (cfg, ins, *ip);
8231 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8232 CHECK_TYPELOAD (klass);
8233 mono_class_init (klass);
8235 if (cfg->generic_sharing_context)
8236 context_used = mono_class_check_context_used (klass);
8239 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8241 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8242 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8246 MonoInst *klass_ins;
8248 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8249 klass, MONO_RGCTX_INFO_KLASS);
8252 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8253 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8255 mini_emit_class_check (cfg, klass_reg, klass);
8257 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8258 ins->type = STACK_MP;
8263 case CEE_MKREFANY: {
8264 MonoInst *loc, *addr;
8267 MONO_INST_NEW (cfg, ins, *ip);
8270 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8271 CHECK_TYPELOAD (klass);
8272 mono_class_init (klass);
8274 if (cfg->generic_sharing_context)
8275 context_used = mono_class_check_context_used (klass);
8277 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8278 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8281 MonoInst *const_ins;
8282 int type_reg = alloc_preg (cfg);
8284 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8285 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8288 } else if (cfg->compile_aot) {
8289 int const_reg = alloc_preg (cfg);
8290 int type_reg = alloc_preg (cfg);
8292 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8293 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8297 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8298 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8300 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8302 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8303 ins->type = STACK_VTYPE;
8304 ins->klass = mono_defaults.typed_reference_class;
8311 MonoClass *handle_class;
8313 CHECK_STACK_OVF (1);
8316 n = read32 (ip + 1);
8318 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8319 handle = mono_method_get_wrapper_data (method, n);
8320 handle_class = mono_method_get_wrapper_data (method, n + 1);
8321 if (handle_class == mono_defaults.typehandle_class)
8322 handle = &((MonoClass*)handle)->byval_arg;
8325 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8329 mono_class_init (handle_class);
8330 if (cfg->generic_sharing_context) {
8331 if (handle_class == mono_defaults.typehandle_class) {
8332 /* If we get a MONO_TYPE_CLASS
8333 then we need to provide the
8335 instantiation of it. */
8336 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8339 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8340 } else if (handle_class == mono_defaults.fieldhandle_class)
8341 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8342 else if (handle_class == mono_defaults.methodhandle_class)
8343 context_used = mono_method_check_context_used (handle);
8345 g_assert_not_reached ();
8348 if (cfg->opt & MONO_OPT_SHARED) {
8349 MonoInst *addr, *vtvar, *iargs [3];
8350 int method_context_used;
8352 if (cfg->generic_sharing_context)
8353 method_context_used = mono_method_check_context_used (method);
8355 method_context_used = 0;
8357 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8359 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8360 EMIT_NEW_ICONST (cfg, iargs [1], n);
8361 if (method_context_used) {
8362 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8363 method, MONO_RGCTX_INFO_METHOD);
8364 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8366 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8367 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8369 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8373 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8375 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8376 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8377 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8378 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8379 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8380 MonoClass *tclass = mono_class_from_mono_type (handle);
8382 mono_class_init (tclass);
8384 ins = emit_get_rgctx_klass (cfg, context_used,
8385 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8386 } else if (cfg->compile_aot) {
8387 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8389 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8391 ins->type = STACK_OBJ;
8392 ins->klass = cmethod->klass;
8395 MonoInst *addr, *vtvar;
8397 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8400 if (handle_class == mono_defaults.typehandle_class) {
8401 ins = emit_get_rgctx_klass (cfg, context_used,
8402 mono_class_from_mono_type (handle),
8403 MONO_RGCTX_INFO_TYPE);
8404 } else if (handle_class == mono_defaults.methodhandle_class) {
8405 ins = emit_get_rgctx_method (cfg, context_used,
8406 handle, MONO_RGCTX_INFO_METHOD);
8407 } else if (handle_class == mono_defaults.fieldhandle_class) {
8408 ins = emit_get_rgctx_field (cfg, context_used,
8409 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8411 g_assert_not_reached ();
8413 } else if (cfg->compile_aot) {
8414 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8416 EMIT_NEW_PCONST (cfg, ins, handle);
8418 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8419 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8420 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8430 MONO_INST_NEW (cfg, ins, OP_THROW);
8432 ins->sreg1 = sp [0]->dreg;
8434 bblock->out_of_line = TRUE;
8435 MONO_ADD_INS (bblock, ins);
8436 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8437 MONO_ADD_INS (bblock, ins);
8440 link_bblock (cfg, bblock, end_bblock);
8441 start_new_bblock = 1;
8443 case CEE_ENDFINALLY:
8444 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8445 MONO_ADD_INS (bblock, ins);
8447 start_new_bblock = 1;
8450 * Control will leave the method so empty the stack, otherwise
8451 * the next basic block will start with a nonempty stack.
8453 while (sp != stack_start) {
8461 if (*ip == CEE_LEAVE) {
8463 target = ip + 5 + (gint32)read32(ip + 1);
8466 target = ip + 2 + (signed char)(ip [1]);
8469 /* empty the stack */
8470 while (sp != stack_start) {
8475 * If this leave statement is in a catch block, check for a
8476 * pending exception, and rethrow it if necessary.
8478 for (i = 0; i < header->num_clauses; ++i) {
8479 MonoExceptionClause *clause = &header->clauses [i];
8482 * Use <= in the final comparison to handle clauses with multiple
8483 * leave statements, like in bug #78024.
8484 * The ordering of the exception clauses guarantees that we find the
8487 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8489 MonoBasicBlock *dont_throw;
8494 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8497 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8499 NEW_BBLOCK (cfg, dont_throw);
8502 * Currently, we allways rethrow the abort exception, despite the
8503 * fact that this is not correct. See thread6.cs for an example.
8504 * But propagating the abort exception is more important than
8505 * getting the sematics right.
8507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8509 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8511 MONO_START_BB (cfg, dont_throw);
8516 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8518 for (tmp = handlers; tmp; tmp = tmp->next) {
8520 link_bblock (cfg, bblock, tblock);
8521 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8522 ins->inst_target_bb = tblock;
8523 MONO_ADD_INS (bblock, ins);
8525 g_list_free (handlers);
8528 MONO_INST_NEW (cfg, ins, OP_BR);
8529 MONO_ADD_INS (bblock, ins);
8530 GET_BBLOCK (cfg, tblock, target);
8531 link_bblock (cfg, bblock, tblock);
8532 ins->inst_target_bb = tblock;
8533 start_new_bblock = 1;
8535 if (*ip == CEE_LEAVE)
8544 * Mono specific opcodes
8546 case MONO_CUSTOM_PREFIX: {
8548 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8552 case CEE_MONO_ICALL: {
8554 MonoJitICallInfo *info;
8556 token = read32 (ip + 2);
8557 func = mono_method_get_wrapper_data (method, token);
8558 info = mono_find_jit_icall_by_addr (func);
8561 CHECK_STACK (info->sig->param_count);
8562 sp -= info->sig->param_count;
8564 ins = mono_emit_jit_icall (cfg, info->func, sp);
8565 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8569 inline_costs += 10 * num_calls++;
8573 case CEE_MONO_LDPTR: {
8576 CHECK_STACK_OVF (1);
8578 token = read32 (ip + 2);
8580 ptr = mono_method_get_wrapper_data (method, token);
8581 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8582 MonoJitICallInfo *callinfo;
8583 const char *icall_name;
8585 icall_name = method->name + strlen ("__icall_wrapper_");
8586 g_assert (icall_name);
8587 callinfo = mono_find_jit_icall_by_name (icall_name);
8588 g_assert (callinfo);
8590 if (ptr == callinfo->func) {
8591 /* Will be transformed into an AOTCONST later */
8592 EMIT_NEW_PCONST (cfg, ins, ptr);
8598 /* FIXME: Generalize this */
8599 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8600 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8605 EMIT_NEW_PCONST (cfg, ins, ptr);
8608 inline_costs += 10 * num_calls++;
8609 /* Can't embed random pointers into AOT code */
8610 cfg->disable_aot = 1;
8613 case CEE_MONO_ICALL_ADDR: {
8614 MonoMethod *cmethod;
8617 CHECK_STACK_OVF (1);
8619 token = read32 (ip + 2);
8621 cmethod = mono_method_get_wrapper_data (method, token);
8623 if (cfg->compile_aot) {
8624 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8626 ptr = mono_lookup_internal_call (cmethod);
8628 EMIT_NEW_PCONST (cfg, ins, ptr);
8634 case CEE_MONO_VTADDR: {
8635 MonoInst *src_var, *src;
8641 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8642 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8647 case CEE_MONO_NEWOBJ: {
8648 MonoInst *iargs [2];
8650 CHECK_STACK_OVF (1);
8652 token = read32 (ip + 2);
8653 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8654 mono_class_init (klass);
8655 NEW_DOMAINCONST (cfg, iargs [0]);
8656 MONO_ADD_INS (cfg->cbb, iargs [0]);
8657 NEW_CLASSCONST (cfg, iargs [1], klass);
8658 MONO_ADD_INS (cfg->cbb, iargs [1]);
8659 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8661 inline_costs += 10 * num_calls++;
8664 case CEE_MONO_OBJADDR:
8667 MONO_INST_NEW (cfg, ins, OP_MOVE);
8668 ins->dreg = alloc_preg (cfg);
8669 ins->sreg1 = sp [0]->dreg;
8670 ins->type = STACK_MP;
8671 MONO_ADD_INS (cfg->cbb, ins);
8675 case CEE_MONO_LDNATIVEOBJ:
8677 * Similar to LDOBJ, but instead load the unmanaged
8678 * representation of the vtype to the stack.
8683 token = read32 (ip + 2);
8684 klass = mono_method_get_wrapper_data (method, token);
8685 g_assert (klass->valuetype);
8686 mono_class_init (klass);
8689 MonoInst *src, *dest, *temp;
8692 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8693 temp->backend.is_pinvoke = 1;
8694 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8695 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8697 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8698 dest->type = STACK_VTYPE;
8699 dest->klass = klass;
8705 case CEE_MONO_RETOBJ: {
8707 * Same as RET, but return the native representation of a vtype
8710 g_assert (cfg->ret);
8711 g_assert (mono_method_signature (method)->pinvoke);
8716 token = read32 (ip + 2);
8717 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8719 if (!cfg->vret_addr) {
8720 g_assert (cfg->ret_var_is_local);
8722 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8724 EMIT_NEW_RETLOADA (cfg, ins);
8726 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8728 if (sp != stack_start)
8731 MONO_INST_NEW (cfg, ins, OP_BR);
8732 ins->inst_target_bb = end_bblock;
8733 MONO_ADD_INS (bblock, ins);
8734 link_bblock (cfg, bblock, end_bblock);
8735 start_new_bblock = 1;
8739 case CEE_MONO_CISINST:
8740 case CEE_MONO_CCASTCLASS: {
8745 token = read32 (ip + 2);
8746 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8747 if (ip [1] == CEE_MONO_CISINST)
8748 ins = handle_cisinst (cfg, klass, sp [0]);
8750 ins = handle_ccastclass (cfg, klass, sp [0]);
8756 case CEE_MONO_SAVE_LMF:
8757 case CEE_MONO_RESTORE_LMF:
8758 #ifdef MONO_ARCH_HAVE_LMF_OPS
8759 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8760 MONO_ADD_INS (bblock, ins);
8761 cfg->need_lmf_area = TRUE;
8765 case CEE_MONO_CLASSCONST:
8766 CHECK_STACK_OVF (1);
8768 token = read32 (ip + 2);
8769 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8772 inline_costs += 10 * num_calls++;
8774 case CEE_MONO_NOT_TAKEN:
8775 bblock->out_of_line = TRUE;
8779 CHECK_STACK_OVF (1);
8781 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8782 ins->dreg = alloc_preg (cfg);
8783 ins->inst_offset = (gint32)read32 (ip + 2);
8784 ins->type = STACK_PTR;
8785 MONO_ADD_INS (bblock, ins);
8790 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8800 /* somewhat similar to LDTOKEN */
8801 MonoInst *addr, *vtvar;
8802 CHECK_STACK_OVF (1);
8803 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8805 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8806 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8808 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8809 ins->type = STACK_VTYPE;
8810 ins->klass = mono_defaults.argumenthandle_class;
8823 * The following transforms:
8824 * CEE_CEQ into OP_CEQ
8825 * CEE_CGT into OP_CGT
8826 * CEE_CGT_UN into OP_CGT_UN
8827 * CEE_CLT into OP_CLT
8828 * CEE_CLT_UN into OP_CLT_UN
8830 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8832 MONO_INST_NEW (cfg, ins, cmp->opcode);
8834 cmp->sreg1 = sp [0]->dreg;
8835 cmp->sreg2 = sp [1]->dreg;
8836 type_from_op (cmp, sp [0], sp [1]);
8838 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8839 cmp->opcode = OP_LCOMPARE;
8840 else if (sp [0]->type == STACK_R8)
8841 cmp->opcode = OP_FCOMPARE;
8843 cmp->opcode = OP_ICOMPARE;
8844 MONO_ADD_INS (bblock, cmp);
8845 ins->type = STACK_I4;
8846 ins->dreg = alloc_dreg (cfg, ins->type);
8847 type_from_op (ins, sp [0], sp [1]);
8849 if (cmp->opcode == OP_FCOMPARE) {
8851 * The backends expect the fceq opcodes to do the
8854 cmp->opcode = OP_NOP;
8855 ins->sreg1 = cmp->sreg1;
8856 ins->sreg2 = cmp->sreg2;
8858 MONO_ADD_INS (bblock, ins);
8865 MonoMethod *cil_method, *ctor_method;
8866 gboolean needs_static_rgctx_invoke;
8868 CHECK_STACK_OVF (1);
8870 n = read32 (ip + 2);
8871 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8874 mono_class_init (cmethod->klass);
8876 mono_save_token_info (cfg, image, n, cmethod);
8878 if (cfg->generic_sharing_context)
8879 context_used = mono_method_check_context_used (cmethod);
8881 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8883 cil_method = cmethod;
8884 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8885 METHOD_ACCESS_FAILURE;
8887 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8888 if (check_linkdemand (cfg, method, cmethod))
8890 CHECK_CFG_EXCEPTION;
8891 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8892 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8896 * Optimize the common case of ldftn+delegate creation
8898 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8899 /* FIXME: SGEN support */
8900 /* FIXME: handle shared static generic methods */
8901 /* FIXME: handle this in shared code */
8902 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8903 MonoInst *target_ins;
8906 if (cfg->verbose_level > 3)
8907 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8908 target_ins = sp [-1];
8910 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8918 if (needs_static_rgctx_invoke)
8919 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8921 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8922 } else if (needs_static_rgctx_invoke) {
8923 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8925 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8927 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8931 inline_costs += 10 * num_calls++;
8934 case CEE_LDVIRTFTN: {
8939 n = read32 (ip + 2);
8940 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8943 mono_class_init (cmethod->klass);
8945 if (cfg->generic_sharing_context)
8946 context_used = mono_method_check_context_used (cmethod);
8948 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8949 if (check_linkdemand (cfg, method, cmethod))
8951 CHECK_CFG_EXCEPTION;
8952 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8953 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8960 args [1] = emit_get_rgctx_method (cfg, context_used,
8961 cmethod, MONO_RGCTX_INFO_METHOD);
8962 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8964 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8965 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8969 inline_costs += 10 * num_calls++;
8973 CHECK_STACK_OVF (1);
8975 n = read16 (ip + 2);
8977 EMIT_NEW_ARGLOAD (cfg, ins, n);
8982 CHECK_STACK_OVF (1);
8984 n = read16 (ip + 2);
8986 NEW_ARGLOADA (cfg, ins, n);
8987 MONO_ADD_INS (cfg->cbb, ins);
8995 n = read16 (ip + 2);
8997 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8999 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9003 CHECK_STACK_OVF (1);
9005 n = read16 (ip + 2);
9007 EMIT_NEW_LOCLOAD (cfg, ins, n);
9012 unsigned char *tmp_ip;
9013 CHECK_STACK_OVF (1);
9015 n = read16 (ip + 2);
9018 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9024 EMIT_NEW_LOCLOADA (cfg, ins, n);
9033 n = read16 (ip + 2);
9035 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9037 emit_stloc_ir (cfg, sp, header, n);
9044 if (sp != stack_start)
9046 if (cfg->method != method)
9048 * Inlining this into a loop in a parent could lead to
9049 * stack overflows which is different behavior than the
9050 * non-inlined case, thus disable inlining in this case.
9052 goto inline_failure;
9054 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9055 ins->dreg = alloc_preg (cfg);
9056 ins->sreg1 = sp [0]->dreg;
9057 ins->type = STACK_PTR;
9058 MONO_ADD_INS (cfg->cbb, ins);
9060 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9061 if (header->init_locals)
9062 ins->flags |= MONO_INST_INIT;
9067 case CEE_ENDFILTER: {
9068 MonoExceptionClause *clause, *nearest;
9069 int cc, nearest_num;
9073 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9075 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9076 ins->sreg1 = (*sp)->dreg;
9077 MONO_ADD_INS (bblock, ins);
9078 start_new_bblock = 1;
9083 for (cc = 0; cc < header->num_clauses; ++cc) {
9084 clause = &header->clauses [cc];
9085 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9086 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9087 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9093 if ((ip - header->code) != nearest->handler_offset)
9098 case CEE_UNALIGNED_:
9099 ins_flag |= MONO_INST_UNALIGNED;
9100 /* FIXME: record alignment? we can assume 1 for now */
9105 ins_flag |= MONO_INST_VOLATILE;
9109 ins_flag |= MONO_INST_TAILCALL;
9110 cfg->flags |= MONO_CFG_HAS_TAIL;
9111 /* Can't inline tail calls at this time */
9112 inline_costs += 100000;
9119 token = read32 (ip + 2);
9120 klass = mini_get_class (method, token, generic_context);
9121 CHECK_TYPELOAD (klass);
9122 if (generic_class_is_reference_type (cfg, klass))
9123 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9125 mini_emit_initobj (cfg, *sp, NULL, klass);
9129 case CEE_CONSTRAINED_:
9131 token = read32 (ip + 2);
9132 constrained_call = mono_class_get_full (image, token, generic_context);
9133 CHECK_TYPELOAD (constrained_call);
9138 MonoInst *iargs [3];
9142 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9143 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9144 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9145 /* emit_memset only works when val == 0 */
9146 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9151 if (ip [1] == CEE_CPBLK) {
9152 MonoMethod *memcpy_method = get_memcpy_method ();
9153 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9155 MonoMethod *memset_method = get_memset_method ();
9156 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9166 ins_flag |= MONO_INST_NOTYPECHECK;
9168 ins_flag |= MONO_INST_NORANGECHECK;
9169 /* we ignore the no-nullcheck for now since we
9170 * really do it explicitly only when doing callvirt->call
9176 int handler_offset = -1;
9178 for (i = 0; i < header->num_clauses; ++i) {
9179 MonoExceptionClause *clause = &header->clauses [i];
9180 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9181 handler_offset = clause->handler_offset;
9186 bblock->flags |= BB_EXCEPTION_UNSAFE;
9188 g_assert (handler_offset != -1);
9190 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9191 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9192 ins->sreg1 = load->dreg;
9193 MONO_ADD_INS (bblock, ins);
9195 link_bblock (cfg, bblock, end_bblock);
9196 start_new_bblock = 1;
9204 CHECK_STACK_OVF (1);
9206 token = read32 (ip + 2);
9207 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9208 MonoType *type = mono_type_create_from_typespec (image, token);
9209 token = mono_type_size (type, &ialign);
9211 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9212 CHECK_TYPELOAD (klass);
9213 mono_class_init (klass);
9214 token = mono_class_value_size (klass, &align);
9216 EMIT_NEW_ICONST (cfg, ins, token);
9221 case CEE_REFANYTYPE: {
9222 MonoInst *src_var, *src;
9228 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9230 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9231 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9232 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9242 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9247 g_error ("opcode 0x%02x not handled", *ip);
9250 if (start_new_bblock != 1)
9253 bblock->cil_length = ip - bblock->cil_code;
9254 bblock->next_bb = end_bblock;
9256 if (cfg->method == method && cfg->domainvar) {
9258 MonoInst *get_domain;
9260 cfg->cbb = init_localsbb;
9262 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9263 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9266 get_domain->dreg = alloc_preg (cfg);
9267 MONO_ADD_INS (cfg->cbb, get_domain);
9269 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9270 MONO_ADD_INS (cfg->cbb, store);
9273 if (cfg->method == method && cfg->got_var)
9274 mono_emit_load_got_addr (cfg);
9276 if (header->init_locals) {
9279 cfg->cbb = init_localsbb;
9280 cfg->ip = header->code;
9281 for (i = 0; i < header->num_locals; ++i) {
9282 MonoType *ptype = header->locals [i];
9283 int t = ptype->type;
9284 dreg = cfg->locals [i]->dreg;
9286 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9287 t = ptype->data.klass->enum_basetype->type;
9289 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9290 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9291 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9292 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9293 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9294 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9295 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9296 ins->type = STACK_R8;
9297 ins->inst_p0 = (void*)&r8_0;
9298 ins->dreg = alloc_dreg (cfg, STACK_R8);
9299 MONO_ADD_INS (init_localsbb, ins);
9300 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9301 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9302 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9303 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9305 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9312 if (cfg->method == method) {
9314 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9315 bb->region = mono_find_block_region (cfg, bb->real_offset);
9317 mono_create_spvar_for_region (cfg, bb->region);
9318 if (cfg->verbose_level > 2)
9319 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9323 g_slist_free (class_inits);
9324 dont_inline = g_list_remove (dont_inline, method);
9326 if (inline_costs < 0) {
9329 /* Method is too large */
9330 mname = mono_method_full_name (method, TRUE);
9331 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9332 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9337 if ((cfg->verbose_level > 2) && (cfg->method == method))
9338 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9340 return inline_costs;
9343 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9344 g_slist_free (class_inits);
9345 dont_inline = g_list_remove (dont_inline, method);
9349 g_slist_free (class_inits);
9350 dont_inline = g_list_remove (dont_inline, method);
9354 g_slist_free (class_inits);
9355 dont_inline = g_list_remove (dont_inline, method);
9356 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9360 g_slist_free (class_inits);
9361 dont_inline = g_list_remove (dont_inline, method);
9362 set_exception_type_from_invalid_il (cfg, method, ip);
9367 store_membase_reg_to_store_membase_imm (int opcode)
9370 case OP_STORE_MEMBASE_REG:
9371 return OP_STORE_MEMBASE_IMM;
9372 case OP_STOREI1_MEMBASE_REG:
9373 return OP_STOREI1_MEMBASE_IMM;
9374 case OP_STOREI2_MEMBASE_REG:
9375 return OP_STOREI2_MEMBASE_IMM;
9376 case OP_STOREI4_MEMBASE_REG:
9377 return OP_STOREI4_MEMBASE_IMM;
9378 case OP_STOREI8_MEMBASE_REG:
9379 return OP_STOREI8_MEMBASE_IMM;
9381 g_assert_not_reached ();
9387 #endif /* DISABLE_JIT */
9390 mono_op_to_op_imm (int opcode)
9400 return OP_IDIV_UN_IMM;
9404 return OP_IREM_UN_IMM;
9418 return OP_ISHR_UN_IMM;
9435 return OP_LSHR_UN_IMM;
9438 return OP_COMPARE_IMM;
9440 return OP_ICOMPARE_IMM;
9442 return OP_LCOMPARE_IMM;
9444 case OP_STORE_MEMBASE_REG:
9445 return OP_STORE_MEMBASE_IMM;
9446 case OP_STOREI1_MEMBASE_REG:
9447 return OP_STOREI1_MEMBASE_IMM;
9448 case OP_STOREI2_MEMBASE_REG:
9449 return OP_STOREI2_MEMBASE_IMM;
9450 case OP_STOREI4_MEMBASE_REG:
9451 return OP_STOREI4_MEMBASE_IMM;
9453 #if defined(__i386__) || defined (__x86_64__)
9455 return OP_X86_PUSH_IMM;
9456 case OP_X86_COMPARE_MEMBASE_REG:
9457 return OP_X86_COMPARE_MEMBASE_IMM;
9459 #if defined(__x86_64__)
9460 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9461 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9463 case OP_VOIDCALL_REG:
9472 return OP_LOCALLOC_IMM;
9479 ldind_to_load_membase (int opcode)
9483 return OP_LOADI1_MEMBASE;
9485 return OP_LOADU1_MEMBASE;
9487 return OP_LOADI2_MEMBASE;
9489 return OP_LOADU2_MEMBASE;
9491 return OP_LOADI4_MEMBASE;
9493 return OP_LOADU4_MEMBASE;
9495 return OP_LOAD_MEMBASE;
9497 return OP_LOAD_MEMBASE;
9499 return OP_LOADI8_MEMBASE;
9501 return OP_LOADR4_MEMBASE;
9503 return OP_LOADR8_MEMBASE;
9505 g_assert_not_reached ();
9512 stind_to_store_membase (int opcode)
9516 return OP_STOREI1_MEMBASE_REG;
9518 return OP_STOREI2_MEMBASE_REG;
9520 return OP_STOREI4_MEMBASE_REG;
9523 return OP_STORE_MEMBASE_REG;
9525 return OP_STOREI8_MEMBASE_REG;
9527 return OP_STORER4_MEMBASE_REG;
9529 return OP_STORER8_MEMBASE_REG;
9531 g_assert_not_reached ();
9538 mono_load_membase_to_load_mem (int opcode)
9540 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9541 #if defined(__i386__) || defined(__x86_64__)
9543 case OP_LOAD_MEMBASE:
9545 case OP_LOADU1_MEMBASE:
9546 return OP_LOADU1_MEM;
9547 case OP_LOADU2_MEMBASE:
9548 return OP_LOADU2_MEM;
9549 case OP_LOADI4_MEMBASE:
9550 return OP_LOADI4_MEM;
9551 case OP_LOADU4_MEMBASE:
9552 return OP_LOADU4_MEM;
9553 #if SIZEOF_VOID_P == 8
9554 case OP_LOADI8_MEMBASE:
9555 return OP_LOADI8_MEM;
9564 op_to_op_dest_membase (int store_opcode, int opcode)
9566 #if defined(__i386__)
9567 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9572 return OP_X86_ADD_MEMBASE_REG;
9574 return OP_X86_SUB_MEMBASE_REG;
9576 return OP_X86_AND_MEMBASE_REG;
9578 return OP_X86_OR_MEMBASE_REG;
9580 return OP_X86_XOR_MEMBASE_REG;
9583 return OP_X86_ADD_MEMBASE_IMM;
9586 return OP_X86_SUB_MEMBASE_IMM;
9589 return OP_X86_AND_MEMBASE_IMM;
9592 return OP_X86_OR_MEMBASE_IMM;
9595 return OP_X86_XOR_MEMBASE_IMM;
9601 #if defined(__x86_64__)
9602 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9607 return OP_X86_ADD_MEMBASE_REG;
9609 return OP_X86_SUB_MEMBASE_REG;
9611 return OP_X86_AND_MEMBASE_REG;
9613 return OP_X86_OR_MEMBASE_REG;
9615 return OP_X86_XOR_MEMBASE_REG;
9617 return OP_X86_ADD_MEMBASE_IMM;
9619 return OP_X86_SUB_MEMBASE_IMM;
9621 return OP_X86_AND_MEMBASE_IMM;
9623 return OP_X86_OR_MEMBASE_IMM;
9625 return OP_X86_XOR_MEMBASE_IMM;
9627 return OP_AMD64_ADD_MEMBASE_REG;
9629 return OP_AMD64_SUB_MEMBASE_REG;
9631 return OP_AMD64_AND_MEMBASE_REG;
9633 return OP_AMD64_OR_MEMBASE_REG;
9635 return OP_AMD64_XOR_MEMBASE_REG;
9638 return OP_AMD64_ADD_MEMBASE_IMM;
9641 return OP_AMD64_SUB_MEMBASE_IMM;
9644 return OP_AMD64_AND_MEMBASE_IMM;
9647 return OP_AMD64_OR_MEMBASE_IMM;
9650 return OP_AMD64_XOR_MEMBASE_IMM;
9660 op_to_op_store_membase (int store_opcode, int opcode)
9662 #if defined(__i386__) || defined(__x86_64__)
9665 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9666 return OP_X86_SETEQ_MEMBASE;
9668 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9669 return OP_X86_SETNE_MEMBASE;
9677 op_to_op_src1_membase (int load_opcode, int opcode)
9680 /* FIXME: This has sign extension issues */
9682 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9683 return OP_X86_COMPARE_MEMBASE8_IMM;
9686 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9691 return OP_X86_PUSH_MEMBASE;
9692 case OP_COMPARE_IMM:
9693 case OP_ICOMPARE_IMM:
9694 return OP_X86_COMPARE_MEMBASE_IMM;
9697 return OP_X86_COMPARE_MEMBASE_REG;
9702 /* FIXME: This has sign extension issues */
9704 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9705 return OP_X86_COMPARE_MEMBASE8_IMM;
9710 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9711 return OP_X86_PUSH_MEMBASE;
9713 /* FIXME: This only works for 32 bit immediates
9714 case OP_COMPARE_IMM:
9715 case OP_LCOMPARE_IMM:
9716 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9717 return OP_AMD64_COMPARE_MEMBASE_IMM;
9719 case OP_ICOMPARE_IMM:
9720 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9721 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9725 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9726 return OP_AMD64_COMPARE_MEMBASE_REG;
9729 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9730 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9739 op_to_op_src2_membase (int load_opcode, int opcode)
9742 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9748 return OP_X86_COMPARE_REG_MEMBASE;
9750 return OP_X86_ADD_REG_MEMBASE;
9752 return OP_X86_SUB_REG_MEMBASE;
9754 return OP_X86_AND_REG_MEMBASE;
9756 return OP_X86_OR_REG_MEMBASE;
9758 return OP_X86_XOR_REG_MEMBASE;
9765 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9766 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9770 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9771 return OP_AMD64_COMPARE_REG_MEMBASE;
9774 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9775 return OP_X86_ADD_REG_MEMBASE;
9777 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9778 return OP_X86_SUB_REG_MEMBASE;
9780 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9781 return OP_X86_AND_REG_MEMBASE;
9783 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9784 return OP_X86_OR_REG_MEMBASE;
9786 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9787 return OP_X86_XOR_REG_MEMBASE;
9789 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9790 return OP_AMD64_ADD_REG_MEMBASE;
9792 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9793 return OP_AMD64_SUB_REG_MEMBASE;
9795 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9796 return OP_AMD64_AND_REG_MEMBASE;
9798 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9799 return OP_AMD64_OR_REG_MEMBASE;
9801 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9802 return OP_AMD64_XOR_REG_MEMBASE;
9810 mono_op_to_op_imm_noemul (int opcode)
9813 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9818 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9826 return mono_op_to_op_imm (opcode);
9833 * mono_handle_global_vregs:
9835 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9839 mono_handle_global_vregs (MonoCompile *cfg)
9845 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9847 #ifdef MONO_ARCH_SIMD_INTRINSICS
9848 if (cfg->uses_simd_intrinsics)
9849 mono_simd_simplify_indirection (cfg);
9852 /* Find local vregs used in more than one bb */
9853 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9854 MonoInst *ins = bb->code;
9855 int block_num = bb->block_num;
9857 if (cfg->verbose_level > 2)
9858 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9861 for (; ins; ins = ins->next) {
9862 const char *spec = INS_INFO (ins->opcode);
9863 int regtype, regindex;
9866 if (G_UNLIKELY (cfg->verbose_level > 2))
9867 mono_print_ins (ins);
9869 g_assert (ins->opcode >= MONO_CEE_LAST);
9871 for (regindex = 0; regindex < 3; regindex ++) {
9874 if (regindex == 0) {
9875 regtype = spec [MONO_INST_DEST];
9879 } else if (regindex == 1) {
9880 regtype = spec [MONO_INST_SRC1];
9885 regtype = spec [MONO_INST_SRC2];
9891 #if SIZEOF_VOID_P == 4
9892 if (regtype == 'l') {
9894 * Since some instructions reference the original long vreg,
9895 * and some reference the two component vregs, it is quite hard
9896 * to determine when it needs to be global. So be conservative.
9898 if (!get_vreg_to_inst (cfg, vreg)) {
9899 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9901 if (cfg->verbose_level > 2)
9902 printf ("LONG VREG R%d made global.\n", vreg);
9906 * Make the component vregs volatile since the optimizations can
9907 * get confused otherwise.
9909 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9910 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9914 g_assert (vreg != -1);
9916 prev_bb = vreg_to_bb [vreg];
9918 /* 0 is a valid block num */
9919 vreg_to_bb [vreg] = block_num + 1;
9920 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9921 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9924 if (!get_vreg_to_inst (cfg, vreg)) {
9925 if (G_UNLIKELY (cfg->verbose_level > 2))
9926 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9930 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9933 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9936 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9939 g_assert_not_reached ();
9943 /* Flag as having been used in more than one bb */
9944 vreg_to_bb [vreg] = -1;
9950 /* If a variable is used in only one bblock, convert it into a local vreg */
9951 for (i = 0; i < cfg->num_varinfo; i++) {
9952 MonoInst *var = cfg->varinfo [i];
9953 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9955 switch (var->type) {
9961 #if SIZEOF_VOID_P == 8
9964 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9965 /* Enabling this screws up the fp stack on x86 */
9968 /* Arguments are implicitly global */
9969 /* Putting R4 vars into registers doesn't work currently */
9970 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
9972 * Make that the variable's liveness interval doesn't contain a call, since
9973 * that would cause the lvreg to be spilled, making the whole optimization
9976 /* This is too slow for JIT compilation */
9978 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9980 int def_index, call_index, ins_index;
9981 gboolean spilled = FALSE;
9986 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9987 const char *spec = INS_INFO (ins->opcode);
9989 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9990 def_index = ins_index;
9992 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9993 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9994 if (call_index > def_index) {
10000 if (MONO_IS_CALL (ins))
10001 call_index = ins_index;
10011 if (G_UNLIKELY (cfg->verbose_level > 2))
10012 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10013 var->flags |= MONO_INST_IS_DEAD;
10014 cfg->vreg_to_inst [var->dreg] = NULL;
10021 * Compress the varinfo and vars tables so the liveness computation is faster and
10022 * takes up less space.
10025 for (i = 0; i < cfg->num_varinfo; ++i) {
10026 MonoInst *var = cfg->varinfo [i];
10027 if (pos < i && cfg->locals_start == i)
10028 cfg->locals_start = pos;
10029 if (!(var->flags & MONO_INST_IS_DEAD)) {
10031 cfg->varinfo [pos] = cfg->varinfo [i];
10032 cfg->varinfo [pos]->inst_c0 = pos;
10033 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10034 cfg->vars [pos].idx = pos;
10035 #if SIZEOF_VOID_P == 4
10036 if (cfg->varinfo [pos]->type == STACK_I8) {
10037 /* Modify the two component vars too */
10040 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10041 var1->inst_c0 = pos;
10042 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10043 var1->inst_c0 = pos;
10050 cfg->num_varinfo = pos;
10051 if (cfg->locals_start > cfg->num_varinfo)
10052 cfg->locals_start = cfg->num_varinfo;
10056 * mono_spill_global_vars:
10058 * Generate spill code for variables which are not allocated to registers,
10059 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10060 * code is generated which could be optimized by the local optimization passes.
10063 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10065 MonoBasicBlock *bb;
10067 int orig_next_vreg;
10068 guint32 *vreg_to_lvreg;
10070 guint32 i, lvregs_len;
10071 gboolean dest_has_lvreg = FALSE;
10072 guint32 stacktypes [128];
10074 *need_local_opts = FALSE;
10076 memset (spec2, 0, sizeof (spec2));
10078 /* FIXME: Move this function to mini.c */
10079 stacktypes ['i'] = STACK_PTR;
10080 stacktypes ['l'] = STACK_I8;
10081 stacktypes ['f'] = STACK_R8;
10082 #ifdef MONO_ARCH_SIMD_INTRINSICS
10083 stacktypes ['x'] = STACK_VTYPE;
10086 #if SIZEOF_VOID_P == 4
10087 /* Create MonoInsts for longs */
10088 for (i = 0; i < cfg->num_varinfo; i++) {
10089 MonoInst *ins = cfg->varinfo [i];
10091 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10092 switch (ins->type) {
10093 #ifdef MONO_ARCH_SOFT_FLOAT
10099 g_assert (ins->opcode == OP_REGOFFSET);
10101 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10103 tree->opcode = OP_REGOFFSET;
10104 tree->inst_basereg = ins->inst_basereg;
10105 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10107 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10109 tree->opcode = OP_REGOFFSET;
10110 tree->inst_basereg = ins->inst_basereg;
10111 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10121 /* FIXME: widening and truncation */
10124 * As an optimization, when a variable allocated to the stack is first loaded into
10125 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10126 * the variable again.
10128 orig_next_vreg = cfg->next_vreg;
10129 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10130 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10133 /* Add spill loads/stores */
10134 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10137 if (cfg->verbose_level > 2)
10138 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10140 /* Clear vreg_to_lvreg array */
10141 for (i = 0; i < lvregs_len; i++)
10142 vreg_to_lvreg [lvregs [i]] = 0;
10146 MONO_BB_FOR_EACH_INS (bb, ins) {
10147 const char *spec = INS_INFO (ins->opcode);
10148 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10149 gboolean store, no_lvreg;
10151 if (G_UNLIKELY (cfg->verbose_level > 2))
10152 mono_print_ins (ins);
10154 if (ins->opcode == OP_NOP)
10158 * We handle LDADDR here as well, since it can only be decomposed
10159 * when variable addresses are known.
10161 if (ins->opcode == OP_LDADDR) {
10162 MonoInst *var = ins->inst_p0;
10164 if (var->opcode == OP_VTARG_ADDR) {
10165 /* Happens on SPARC/S390 where vtypes are passed by reference */
10166 MonoInst *vtaddr = var->inst_left;
10167 if (vtaddr->opcode == OP_REGVAR) {
10168 ins->opcode = OP_MOVE;
10169 ins->sreg1 = vtaddr->dreg;
10171 else if (var->inst_left->opcode == OP_REGOFFSET) {
10172 ins->opcode = OP_LOAD_MEMBASE;
10173 ins->inst_basereg = vtaddr->inst_basereg;
10174 ins->inst_offset = vtaddr->inst_offset;
10178 g_assert (var->opcode == OP_REGOFFSET);
10180 ins->opcode = OP_ADD_IMM;
10181 ins->sreg1 = var->inst_basereg;
10182 ins->inst_imm = var->inst_offset;
10185 *need_local_opts = TRUE;
10186 spec = INS_INFO (ins->opcode);
10189 if (ins->opcode < MONO_CEE_LAST) {
10190 mono_print_ins (ins);
10191 g_assert_not_reached ();
10195 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10199 if (MONO_IS_STORE_MEMBASE (ins)) {
10200 tmp_reg = ins->dreg;
10201 ins->dreg = ins->sreg2;
10202 ins->sreg2 = tmp_reg;
10205 spec2 [MONO_INST_DEST] = ' ';
10206 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10207 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10209 } else if (MONO_IS_STORE_MEMINDEX (ins))
10210 g_assert_not_reached ();
10215 if (G_UNLIKELY (cfg->verbose_level > 2))
10216 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10221 regtype = spec [MONO_INST_DEST];
10222 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10225 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10226 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10227 MonoInst *store_ins;
10230 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10232 if (var->opcode == OP_REGVAR) {
10233 ins->dreg = var->dreg;
10234 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10236 * Instead of emitting a load+store, use a _membase opcode.
10238 g_assert (var->opcode == OP_REGOFFSET);
10239 if (ins->opcode == OP_MOVE) {
10242 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10243 ins->inst_basereg = var->inst_basereg;
10244 ins->inst_offset = var->inst_offset;
10247 spec = INS_INFO (ins->opcode);
10251 g_assert (var->opcode == OP_REGOFFSET);
10253 prev_dreg = ins->dreg;
10255 /* Invalidate any previous lvreg for this vreg */
10256 vreg_to_lvreg [ins->dreg] = 0;
10260 #ifdef MONO_ARCH_SOFT_FLOAT
10261 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10263 store_opcode = OP_STOREI8_MEMBASE_REG;
10267 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10269 if (regtype == 'l') {
10270 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10271 mono_bblock_insert_after_ins (bb, ins, store_ins);
10272 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10273 mono_bblock_insert_after_ins (bb, ins, store_ins);
10276 g_assert (store_opcode != OP_STOREV_MEMBASE);
10278 /* Try to fuse the store into the instruction itself */
10279 /* FIXME: Add more instructions */
10280 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10281 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10282 ins->inst_imm = ins->inst_c0;
10283 ins->inst_destbasereg = var->inst_basereg;
10284 ins->inst_offset = var->inst_offset;
10285 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10286 ins->opcode = store_opcode;
10287 ins->inst_destbasereg = var->inst_basereg;
10288 ins->inst_offset = var->inst_offset;
10292 tmp_reg = ins->dreg;
10293 ins->dreg = ins->sreg2;
10294 ins->sreg2 = tmp_reg;
10297 spec2 [MONO_INST_DEST] = ' ';
10298 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10299 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10301 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10302 // FIXME: The backends expect the base reg to be in inst_basereg
10303 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10305 ins->inst_basereg = var->inst_basereg;
10306 ins->inst_offset = var->inst_offset;
10307 spec = INS_INFO (ins->opcode);
10309 /* printf ("INS: "); mono_print_ins (ins); */
10310 /* Create a store instruction */
10311 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10313 /* Insert it after the instruction */
10314 mono_bblock_insert_after_ins (bb, ins, store_ins);
10317 * We can't assign ins->dreg to var->dreg here, since the
10318 * sregs could use it. So set a flag, and do it after
10321 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10322 dest_has_lvreg = TRUE;
10331 for (srcindex = 0; srcindex < 2; ++srcindex) {
10332 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10333 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10335 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10336 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10337 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10338 MonoInst *load_ins;
10339 guint32 load_opcode;
10341 if (var->opcode == OP_REGVAR) {
10343 ins->sreg1 = var->dreg;
10345 ins->sreg2 = var->dreg;
10349 g_assert (var->opcode == OP_REGOFFSET);
10351 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10353 g_assert (load_opcode != OP_LOADV_MEMBASE);
10355 if (vreg_to_lvreg [sreg]) {
10356 /* The variable is already loaded to an lvreg */
10357 if (G_UNLIKELY (cfg->verbose_level > 2))
10358 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10360 ins->sreg1 = vreg_to_lvreg [sreg];
10362 ins->sreg2 = vreg_to_lvreg [sreg];
10366 /* Try to fuse the load into the instruction */
10367 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10368 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10369 ins->inst_basereg = var->inst_basereg;
10370 ins->inst_offset = var->inst_offset;
10371 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10372 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10373 ins->sreg2 = var->inst_basereg;
10374 ins->inst_offset = var->inst_offset;
10376 if (MONO_IS_REAL_MOVE (ins)) {
10377 ins->opcode = OP_NOP;
10380 //printf ("%d ", srcindex); mono_print_ins (ins);
10382 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10384 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10385 if (var->dreg == prev_dreg) {
10387 * sreg refers to the value loaded by the load
10388 * emitted below, but we need to use ins->dreg
10389 * since it refers to the store emitted earlier.
10393 vreg_to_lvreg [var->dreg] = sreg;
10394 g_assert (lvregs_len < 1024);
10395 lvregs [lvregs_len ++] = var->dreg;
10404 if (regtype == 'l') {
10405 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10406 mono_bblock_insert_before_ins (bb, ins, load_ins);
10407 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10408 mono_bblock_insert_before_ins (bb, ins, load_ins);
10411 #if SIZEOF_VOID_P == 4
10412 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10414 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10415 mono_bblock_insert_before_ins (bb, ins, load_ins);
10421 if (dest_has_lvreg) {
10422 vreg_to_lvreg [prev_dreg] = ins->dreg;
10423 g_assert (lvregs_len < 1024);
10424 lvregs [lvregs_len ++] = prev_dreg;
10425 dest_has_lvreg = FALSE;
10429 tmp_reg = ins->dreg;
10430 ins->dreg = ins->sreg2;
10431 ins->sreg2 = tmp_reg;
10434 if (MONO_IS_CALL (ins)) {
10435 /* Clear vreg_to_lvreg array */
10436 for (i = 0; i < lvregs_len; i++)
10437 vreg_to_lvreg [lvregs [i]] = 0;
10441 if (cfg->verbose_level > 2)
10442 mono_print_ins_index (1, ins);
10449 * - use 'iadd' instead of 'int_add'
10450 * - handling ovf opcodes: decompose in method_to_ir.
10451 * - unify iregs/fregs
10452 * -> partly done, the missing parts are:
10453 * - a more complete unification would involve unifying the hregs as well, so
10454 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10455 * would no longer map to the machine hregs, so the code generators would need to
10456 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10457 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10458 * fp/non-fp branches speeds it up by about 15%.
10459 * - use sext/zext opcodes instead of shifts
10461 * - get rid of TEMPLOADs if possible and use vregs instead
10462 * - clean up usage of OP_P/OP_ opcodes
10463 * - cleanup usage of DUMMY_USE
10464 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10466 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10467 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10468 * - make sure handle_stack_args () is called before the branch is emitted
10469 * - when the new IR is done, get rid of all unused stuff
10470 * - COMPARE/BEQ as separate instructions or unify them ?
10471 * - keeping them separate allows specialized compare instructions like
10472 * compare_imm, compare_membase
10473 * - most back ends unify fp compare+branch, fp compare+ceq
10474 * - integrate mono_save_args into inline_method
10475 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10476 * - handle long shift opts on 32 bit platforms somehow: they require
10477 * 3 sregs (2 for arg1 and 1 for arg2)
10478 * - make byref a 'normal' type.
10479 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10480 * variable if needed.
10481 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10482 * like inline_method.
10483 * - remove inlining restrictions
10484 * - fix LNEG and enable cfold of INEG
10485 * - generalize x86 optimizations like ldelema as a peephole optimization
10486 * - add store_mem_imm for amd64
10487 * - optimize the loading of the interruption flag in the managed->native wrappers
10488 * - avoid special handling of OP_NOP in passes
10489 * - move code inserting instructions into one function/macro.
10490 * - try a coalescing phase after liveness analysis
10491 * - add float -> vreg conversion + local optimizations on !x86
10492 * - figure out how to handle decomposed branches during optimizations, ie.
10493 * compare+branch, op_jump_table+op_br etc.
10494 * - promote RuntimeXHandles to vregs
10495 * - vtype cleanups:
10496 * - add a NEW_VARLOADA_VREG macro
10497 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10498 * accessing vtype fields.
10499 * - get rid of I8CONST on 64 bit platforms
10500 * - dealing with the increase in code size due to branches created during opcode
10502 * - use extended basic blocks
10503 * - all parts of the JIT
10504 * - handle_global_vregs () && local regalloc
10505 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10506 * - sources of increase in code size:
10509 * - isinst and castclass
10510 * - lvregs not allocated to global registers even if used multiple times
10511 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10513 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10514 * - add all micro optimizations from the old JIT
10515 * - put tree optimizations into the deadce pass
10516 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10517 * specific function.
10518 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10519 * fcompare + branchCC.
10520 * - create a helper function for allocating a stack slot, taking into account
10521 * MONO_CFG_HAS_SPILLUP.
10522 * - merge new GC changes in mini.c.
10524 * - merge the ia64 switch changes.
10525 * - merge the mips conditional changes.
10526 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10527 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10528 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10529 * - optimize mono_regstate2_alloc_int/float.
10530 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10531 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10532 * parts of the tree could be separated by other instructions, killing the tree
10533 * arguments, or stores killing loads etc. Also, should we fold loads into other
10534 * instructions if the result of the load is used multiple times ?
10535 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10536 * - LAST MERGE: 108395.
10537 * - when returning vtypes in registers, generate IR and append it to the end of the
10538 * last bb instead of doing it in the epilog.
10539 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10540 * ones in inssel.h.
10541 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10549 - When to decompose opcodes:
10550 - earlier: this makes some optimizations hard to implement, since the low level IR
10551 no longer contains the neccessary information. But it is easier to do.
10552 - later: harder to implement, enables more optimizations.
10553 - Branches inside bblocks:
10554 - created when decomposing complex opcodes.
10555 - branches to another bblock: harmless, but not tracked by the branch
10556 optimizations, so need to branch to a label at the start of the bblock.
10557 - branches to inside the same bblock: very problematic, trips up the local
10558 reg allocator. Can be fixed by spitting the current bblock, but that is a
10559 complex operation, since some local vregs can become global vregs etc.
10560 - Local/global vregs:
10561 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10562 local register allocator.
10563 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10564 structure, created by mono_create_var (). Assigned to hregs or the stack by
10565 the global register allocator.
10566 - When to do optimizations like alu->alu_imm:
10567 - earlier -> saves work later on since the IR will be smaller/simpler
10568 - later -> can work on more instructions
10569 - Handling of valuetypes:
10570 - When a vtype is pushed on the stack, a new temporary is created, an
10571 instruction computing its address (LDADDR) is emitted and pushed on
10572 the stack. Need to optimize cases when the vtype is used immediately as in
10573 argument passing, stloc etc.
10574 - Instead of the to_end stuff in the old JIT, simply call the function handling
10575 the values on the stack before emitting the last instruction of the bb.
10578 #endif /* DISABLE_JIT */