2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(ins, arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2178 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2179 } else if (cfg->compile_aot) {
2180 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2196 static MonoJumpInfo *
2197 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2199 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2203 ji->data.target = target;
2208 inline static MonoInst*
2209 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2211 inline static MonoCallInst *
2212 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2213 MonoInst **args, int calli, int virtual)
2216 #ifdef MONO_ARCH_SOFT_FLOAT
2220 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2223 call->signature = sig;
2225 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2231 temp->backend.is_pinvoke = sig->pinvoke;
2234 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2235 * address of return value to increase optimization opportunities.
2236 * Before vtype decomposition, the dreg of the call ins itself represents the
2237 * fact the call modifies the return value. After decomposition, the call will
2238 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2239 * will be transformed into an LDADDR.
2241 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2242 loada->dreg = alloc_preg (cfg);
2243 loada->inst_p0 = temp;
2244 /* We reference the call too since call->dreg could change during optimization */
2245 loada->inst_p1 = call;
2246 MONO_ADD_INS (cfg->cbb, loada);
2248 call->inst.dreg = temp->dreg;
2250 call->vret_var = loada;
2251 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2252 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2254 #ifdef MONO_ARCH_SOFT_FLOAT
2256 * If the call has a float argument, we would need to do an r8->r4 conversion using
2257 * an icall, but that cannot be done during the call sequence since it would clobber
2258 * the call registers + the stack. So we do it before emitting the call.
2260 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2262 MonoInst *in = call->args [i];
2264 if (i >= sig->hasthis)
2265 t = sig->params [i - sig->hasthis];
2267 t = &mono_defaults.int_class->byval_arg;
2268 t = mono_type_get_underlying_type (t);
2270 if (!t->byref && t->type == MONO_TYPE_R4) {
2271 MonoInst *iargs [1];
2275 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2277 /* The result will be in an int vreg */
2278 call->args [i] = conv;
2283 mono_arch_emit_call (cfg, call);
2285 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2286 cfg->flags |= MONO_CFG_HAS_CALLS;
2291 inline static MonoInst*
2292 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2294 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2296 call->inst.sreg1 = addr->dreg;
2298 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2300 return (MonoInst*)call;
2303 inline static MonoInst*
2304 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2306 #ifdef MONO_ARCH_RGCTX_REG
2308 int rgctx_reg = mono_alloc_preg (cfg);
2310 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2311 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2312 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2313 cfg->uses_rgctx_reg = TRUE;
2314 return (MonoInst*)call;
2316 g_assert_not_reached ();
2322 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2323 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2325 gboolean virtual = this != NULL;
2326 gboolean enable_for_aot = TRUE;
2329 if (method->string_ctor) {
2330 /* Create the real signature */
2331 /* FIXME: Cache these */
2332 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2333 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2338 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2340 if (this && sig->hasthis &&
2341 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2342 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2343 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2345 call->method = method;
2347 call->inst.flags |= MONO_INST_HAS_METHOD;
2348 call->inst.inst_left = this;
2351 int vtable_reg, slot_reg, this_reg;
2353 this_reg = this->dreg;
2355 if ((!cfg->compile_aot || enable_for_aot) &&
2356 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2357 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2358 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2360 * the method is not virtual, we just need to ensure this is not null
2361 * and then we can call the method directly.
2363 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2364 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2367 if (!method->string_ctor) {
2368 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2369 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2370 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2373 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2375 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 return (MonoInst*)call;
2380 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2381 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2382 /* Make a call to delegate->invoke_impl */
2383 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2384 call->inst.inst_basereg = this_reg;
2385 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2386 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2388 return (MonoInst*)call;
2392 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2393 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2394 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2396 * the method is virtual, but we can statically dispatch since either
2397 * it's class or the method itself are sealed.
2398 * But first we need to ensure it's not a null reference.
2400 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2401 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2402 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2404 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2405 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2407 return (MonoInst*)call;
2410 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2412 /* Initialize method->slot */
2413 mono_class_setup_vtable (method->klass);
2415 vtable_reg = alloc_preg (cfg);
2416 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2417 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2419 #ifdef MONO_ARCH_HAVE_IMT
2421 guint32 imt_slot = mono_method_get_imt_slot (method);
2422 emit_imt_argument (cfg, call, imt_arg);
2423 slot_reg = vtable_reg;
2424 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2427 if (slot_reg == -1) {
2428 slot_reg = alloc_preg (cfg);
2429 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2430 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2433 slot_reg = vtable_reg;
2434 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2437 call->inst.sreg1 = slot_reg;
2438 call->virtual = TRUE;
2441 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2443 return (MonoInst*)call;
2446 static inline MonoInst*
2447 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2449 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2453 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2460 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2463 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2465 return (MonoInst*)call;
2468 inline static MonoInst*
2469 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2471 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2475 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2479 * mono_emit_abs_call:
2481 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2483 inline static MonoInst*
2484 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2485 MonoMethodSignature *sig, MonoInst **args)
2487 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2491 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2494 if (cfg->abs_patches == NULL)
2495 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2496 g_hash_table_insert (cfg->abs_patches, ji, ji);
2497 ins = mono_emit_native_call (cfg, ji, sig, args);
2498 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2503 get_memcpy_method (void)
2505 static MonoMethod *memcpy_method = NULL;
2506 if (!memcpy_method) {
2507 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2509 g_error ("Old corlib found. Install a new one");
2511 return memcpy_method;
2515 * Emit code to copy a valuetype of type @klass whose address is stored in
2516 * @src->dreg to memory whose address is stored at @dest->dreg.
2519 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2521 MonoInst *iargs [3];
2524 MonoMethod *memcpy_method;
2528 * This check breaks with spilled vars... need to handle it during verification anyway.
2529 * g_assert (klass && klass == src->klass && klass == dest->klass);
2533 n = mono_class_native_size (klass, &align);
2535 n = mono_class_value_size (klass, &align);
2537 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2538 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2539 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2543 EMIT_NEW_ICONST (cfg, iargs [2], n);
2545 memcpy_method = get_memcpy_method ();
2546 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2551 get_memset_method (void)
2553 static MonoMethod *memset_method = NULL;
2554 if (!memset_method) {
2555 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2557 g_error ("Old corlib found. Install a new one");
2559 return memset_method;
2563 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2565 MonoInst *iargs [3];
2568 MonoMethod *memset_method;
2570 /* FIXME: Optimize this for the case when dest is an LDADDR */
2572 mono_class_init (klass);
2573 n = mono_class_value_size (klass, &align);
2575 if (n <= sizeof (gpointer) * 5) {
2576 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2579 memset_method = get_memset_method ();
2581 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2582 EMIT_NEW_ICONST (cfg, iargs [2], n);
2583 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2588 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2590 MonoInst *this = NULL;
2592 g_assert (!method->klass->valuetype);
2594 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2595 EMIT_NEW_ARGLOAD (cfg, this, 0);
2597 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2598 MonoInst *mrgctx_loc, *mrgctx_var;
2601 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2603 mrgctx_loc = mono_get_vtable_var (cfg);
2604 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2607 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2608 MonoInst *vtable_loc, *vtable_var;
2612 vtable_loc = mono_get_vtable_var (cfg);
2613 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2615 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2616 MonoInst *mrgctx_var = vtable_var;
2619 vtable_reg = alloc_preg (cfg);
2620 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2621 vtable_var->type = STACK_PTR;
2627 int vtable_reg, res_reg;
2629 vtable_reg = alloc_preg (cfg);
2630 res_reg = alloc_preg (cfg);
2631 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2636 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2637 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2638 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2641 static MonoJumpInfoRgctxEntry *
2642 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2644 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2645 res->method = method;
2646 res->in_mrgctx = in_mrgctx;
2647 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2648 res->data->type = patch_type;
2649 res->data->data.target = patch_data;
2650 res->info_type = info_type;
2655 static inline MonoInst*
2656 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2658 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2662 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2663 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2665 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2667 return emit_rgctx_fetch (cfg, rgctx, entry);
2671 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2672 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2674 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2676 return emit_rgctx_fetch (cfg, rgctx, entry);
2680 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2681 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2683 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2685 return emit_rgctx_fetch (cfg, rgctx, entry);
2689 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2693 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2695 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2698 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2699 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2701 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2703 return mono_emit_method_call (cfg, method, &val, NULL);
2708 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2712 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2713 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2714 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2715 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2717 obj_reg = sp [0]->dreg;
2718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2721 /* FIXME: generics */
2722 g_assert (klass->rank == 0);
2725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2732 MonoInst *element_class;
2734 /* This assertion is from the unboxcast insn */
2735 g_assert (klass->rank == 0);
2737 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2738 klass->element_class, MONO_RGCTX_INFO_KLASS);
2740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2741 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2743 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2746 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2747 MONO_ADD_INS (cfg->cbb, add);
2748 add->type = STACK_MP;
2755 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2757 MonoInst *iargs [2];
2760 if (cfg->opt & MONO_OPT_SHARED) {
2761 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2762 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2764 alloc_ftn = mono_object_new;
2765 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2766 /* This happens often in argument checking code, eg. throw new FooException... */
2767 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2768 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2769 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2771 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2772 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2775 if (managed_alloc) {
2776 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2777 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2779 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2781 guint32 lw = vtable->klass->instance_size;
2782 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2783 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2784 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2787 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2791 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2795 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2798 MonoInst *iargs [2];
2799 MonoMethod *managed_alloc = NULL;
2803 FIXME: we cannot get managed_alloc here because we can't get
2804 the class's vtable (because it's not a closed class)
2806 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2807 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2810 if (cfg->opt & MONO_OPT_SHARED) {
2811 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2812 iargs [1] = data_inst;
2813 alloc_ftn = mono_object_new;
2815 if (managed_alloc) {
2816 iargs [0] = data_inst;
2817 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2820 iargs [0] = data_inst;
2821 alloc_ftn = mono_object_new_specific;
2824 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2828 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2830 MonoInst *alloc, *ins;
2832 if (mono_class_is_nullable (klass)) {
2833 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2834 return mono_emit_method_call (cfg, method, &val, NULL);
2837 alloc = handle_alloc (cfg, klass, TRUE);
2839 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2845 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *rgctx, MonoInst *data_inst)
2847 MonoInst *alloc, *ins;
2849 if (mono_class_is_nullable (klass)) {
2850 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2851 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2852 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2854 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2856 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2858 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2865 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2867 MonoBasicBlock *is_null_bb;
2868 int obj_reg = src->dreg;
2869 int vtable_reg = alloc_preg (cfg);
2871 NEW_BBLOCK (cfg, is_null_bb);
2873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2874 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2876 if (mini_get_debug_options ()->better_cast_details) {
2877 int to_klass_reg = alloc_preg (cfg);
2878 int klass_reg = alloc_preg (cfg);
2879 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2882 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2886 MONO_ADD_INS (cfg->cbb, tls_get);
2887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2888 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2891 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2895 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2897 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2899 int klass_reg = alloc_preg (cfg);
2901 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2903 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2904 /* the remoting code is broken, access the class for now */
2906 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2912 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2915 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2919 MONO_START_BB (cfg, is_null_bb);
2921 /* Reset the variables holding the cast details */
2922 if (mini_get_debug_options ()->better_cast_details) {
2923 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2925 MONO_ADD_INS (cfg->cbb, tls_get);
2926 /* It is enough to reset the from field */
2927 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2934 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2937 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2938 int obj_reg = src->dreg;
2939 int vtable_reg = alloc_preg (cfg);
2940 int res_reg = alloc_preg (cfg);
2942 NEW_BBLOCK (cfg, is_null_bb);
2943 NEW_BBLOCK (cfg, false_bb);
2944 NEW_BBLOCK (cfg, end_bb);
2946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2949 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2951 /* the is_null_bb target simply copies the input register to the output */
2952 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2954 int klass_reg = alloc_preg (cfg);
2956 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2959 int rank_reg = alloc_preg (cfg);
2960 int eclass_reg = alloc_preg (cfg);
2962 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2964 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2967 if (klass->cast_class == mono_defaults.object_class) {
2968 int parent_reg = alloc_preg (cfg);
2969 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2970 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2971 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2972 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2973 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2974 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2975 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2977 } else if (klass->cast_class == mono_defaults.enum_class) {
2978 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2980 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2981 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2983 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2984 /* Check that the object is a vector too */
2985 int bounds_reg = alloc_preg (cfg);
2986 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2991 /* the is_null_bb target simply copies the input register to the output */
2992 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2994 } else if (mono_class_is_nullable (klass)) {
2995 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2996 /* the is_null_bb target simply copies the input register to the output */
2997 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2999 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3000 /* the remoting code is broken, access the class for now */
3002 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3012 /* the is_null_bb target simply copies the input register to the output */
3013 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3018 MONO_START_BB (cfg, false_bb);
3020 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3023 MONO_START_BB (cfg, is_null_bb);
3025 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3026 ins->type = STACK_OBJ;
3029 MONO_START_BB (cfg, end_bb);
3035 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3037 /* This opcode takes as input an object reference and a class, and returns:
3038 0) if the object is an instance of the class,
3039 1) if the object is not instance of the class,
3040 2) if the object is a proxy whose type cannot be determined */
3043 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3044 int obj_reg = src->dreg;
3045 int dreg = alloc_ireg (cfg);
3047 int klass_reg = alloc_preg (cfg);
3049 NEW_BBLOCK (cfg, true_bb);
3050 NEW_BBLOCK (cfg, false_bb);
3051 NEW_BBLOCK (cfg, false2_bb);
3052 NEW_BBLOCK (cfg, end_bb);
3053 NEW_BBLOCK (cfg, no_proxy_bb);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3058 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3059 NEW_BBLOCK (cfg, interface_fail_bb);
3061 tmp_reg = alloc_preg (cfg);
3062 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3063 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3064 MONO_START_BB (cfg, interface_fail_bb);
3065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3067 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3069 tmp_reg = alloc_preg (cfg);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3074 tmp_reg = alloc_preg (cfg);
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3078 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3079 tmp_reg = alloc_preg (cfg);
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3083 tmp_reg = alloc_preg (cfg);
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3088 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3091 MONO_START_BB (cfg, no_proxy_bb);
3093 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3096 MONO_START_BB (cfg, false_bb);
3098 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3101 MONO_START_BB (cfg, false2_bb);
3103 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3106 MONO_START_BB (cfg, true_bb);
3108 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3110 MONO_START_BB (cfg, end_bb);
3113 MONO_INST_NEW (cfg, ins, OP_ICONST);
3115 ins->type = STACK_I4;
3121 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3123 /* This opcode takes as input an object reference and a class, and returns:
3124 0) if the object is an instance of the class,
3125 1) if the object is a proxy whose type cannot be determined
3126 an InvalidCastException exception is thrown otherwhise*/
3129 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3130 int obj_reg = src->dreg;
3131 int dreg = alloc_ireg (cfg);
3132 int tmp_reg = alloc_preg (cfg);
3133 int klass_reg = alloc_preg (cfg);
3135 NEW_BBLOCK (cfg, end_bb);
3136 NEW_BBLOCK (cfg, ok_result_bb);
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3141 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3142 NEW_BBLOCK (cfg, interface_fail_bb);
3144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3145 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3146 MONO_START_BB (cfg, interface_fail_bb);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3149 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3151 tmp_reg = alloc_preg (cfg);
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3154 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3156 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3160 NEW_BBLOCK (cfg, no_proxy_bb);
3162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3166 tmp_reg = alloc_preg (cfg);
3167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3170 tmp_reg = alloc_preg (cfg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3172 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3175 NEW_BBLOCK (cfg, fail_1_bb);
3177 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3179 MONO_START_BB (cfg, fail_1_bb);
3181 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3182 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3184 MONO_START_BB (cfg, no_proxy_bb);
3186 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3189 MONO_START_BB (cfg, ok_result_bb);
3191 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3193 MONO_START_BB (cfg, end_bb);
3196 MONO_INST_NEW (cfg, ins, OP_ICONST);
3198 ins->type = STACK_I4;
3203 static G_GNUC_UNUSED MonoInst*
3204 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3206 gpointer *trampoline;
3207 MonoInst *obj, *method_ins, *tramp_ins;
3211 obj = handle_alloc (cfg, klass, FALSE);
3213 /* Inline the contents of mono_delegate_ctor */
3215 /* Set target field */
3216 /* Optimize away setting of NULL target */
3217 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3220 /* Set method field */
3221 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3225 * To avoid looking up the compiled code belonging to the target method
3226 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3227 * store it, and we fill it after the method has been compiled.
3229 if (!cfg->compile_aot && !method->dynamic) {
3230 MonoInst *code_slot_ins;
3232 domain = mono_domain_get ();
3233 mono_domain_lock (domain);
3234 if (!domain->method_code_hash)
3235 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3236 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3238 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3239 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3241 mono_domain_unlock (domain);
3243 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3247 /* Set invoke_impl field */
3248 trampoline = mono_create_delegate_trampoline (klass);
3249 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3252 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3258 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3260 MonoJitICallInfo *info;
3262 /* Need to register the icall so it gets an icall wrapper */
3263 info = mono_get_array_new_va_icall (rank);
3265 cfg->flags |= MONO_CFG_HAS_VARARGS;
3267 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3268 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3272 mono_emit_load_got_addr (MonoCompile *cfg)
3274 MonoInst *getaddr, *dummy_use;
3276 if (!cfg->got_var || cfg->got_var_allocated)
3279 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3280 getaddr->dreg = cfg->got_var->dreg;
3282 /* Add it to the start of the first bblock */
3283 if (cfg->bb_entry->code) {
3284 getaddr->next = cfg->bb_entry->code;
3285 cfg->bb_entry->code = getaddr;
3288 MONO_ADD_INS (cfg->bb_entry, getaddr);
3290 cfg->got_var_allocated = TRUE;
3293 * Add a dummy use to keep the got_var alive, since real uses might
3294 * only be generated by the back ends.
3295 * Add it to end_bblock, so the variable's lifetime covers the whole
3297 * It would be better to make the usage of the got var explicit in all
3298 * cases when the backend needs it (i.e. calls, throw etc.), so this
3299 * wouldn't be needed.
3301 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3302 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3306 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3308 MonoMethodHeader *header = mono_method_get_header (method);
3310 #ifdef MONO_ARCH_SOFT_FLOAT
3311 MonoMethodSignature *sig = mono_method_signature (method);
3315 if (cfg->generic_sharing_context)
3318 #ifdef MONO_ARCH_HAVE_LMF_OPS
3319 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3320 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3321 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3325 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3326 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3327 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3328 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3329 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3330 (method->klass->marshalbyref) ||
3331 !header || header->num_clauses)
3334 /* also consider num_locals? */
3335 /* Do the size check early to avoid creating vtables */
3336 if (getenv ("MONO_INLINELIMIT")) {
3337 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3340 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3344 * if we can initialize the class of the method right away, we do,
3345 * otherwise we don't allow inlining if the class needs initialization,
3346 * since it would mean inserting a call to mono_runtime_class_init()
3347 * inside the inlined code
3349 if (!(cfg->opt & MONO_OPT_SHARED)) {
3350 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3351 if (cfg->run_cctors && method->klass->has_cctor) {
3352 if (!method->klass->runtime_info)
3353 /* No vtable created yet */
3355 vtable = mono_class_vtable (cfg->domain, method->klass);
3358 /* This makes so that inline cannot trigger */
3359 /* .cctors: too many apps depend on them */
3360 /* running with a specific order... */
3361 if (! vtable->initialized)
3363 mono_runtime_class_init (vtable);
3365 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3366 if (!method->klass->runtime_info)
3367 /* No vtable created yet */
3369 vtable = mono_class_vtable (cfg->domain, method->klass);
3372 if (!vtable->initialized)
3377 * If we're compiling for shared code
3378 * the cctor will need to be run at aot method load time, for example,
3379 * or at the end of the compilation of the inlining method.
3381 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3386 * CAS - do not inline methods with declarative security
3387 * Note: this has to be before any possible return TRUE;
3389 if (mono_method_has_declsec (method))
3392 #ifdef MONO_ARCH_SOFT_FLOAT
3394 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3396 for (i = 0; i < sig->param_count; ++i)
3397 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3405 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3407 if (vtable->initialized && !cfg->compile_aot)
3410 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3413 if (!mono_class_needs_cctor_run (vtable->klass, method))
3416 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3417 /* The initialization is already done before the method is called */
3424 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3428 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3430 mono_class_init (klass);
3431 size = mono_class_array_element_size (klass);
3433 mult_reg = alloc_preg (cfg);
3434 array_reg = arr->dreg;
3435 index_reg = index->dreg;
3437 #if SIZEOF_VOID_P == 8
3438 /* The array reg is 64 bits but the index reg is only 32 */
3439 index2_reg = alloc_preg (cfg);
3440 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3442 index2_reg = index_reg;
3445 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3447 #if defined(__i386__) || defined(__x86_64__)
3448 if (size == 1 || size == 2 || size == 4 || size == 8) {
3449 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3451 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3452 ins->type = STACK_PTR;
3458 add_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3461 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3462 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3463 ins->type = STACK_PTR;
3464 MONO_ADD_INS (cfg->cbb, ins);
3469 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3471 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3473 int bounds_reg = alloc_preg (cfg);
3474 int add_reg = alloc_preg (cfg);
3475 int mult_reg = alloc_preg (cfg);
3476 int mult2_reg = alloc_preg (cfg);
3477 int low1_reg = alloc_preg (cfg);
3478 int low2_reg = alloc_preg (cfg);
3479 int high1_reg = alloc_preg (cfg);
3480 int high2_reg = alloc_preg (cfg);
3481 int realidx1_reg = alloc_preg (cfg);
3482 int realidx2_reg = alloc_preg (cfg);
3483 int sum_reg = alloc_preg (cfg);
3488 mono_class_init (klass);
3489 size = mono_class_array_element_size (klass);
3491 index1 = index_ins1->dreg;
3492 index2 = index_ins2->dreg;
3494 /* range checking */
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3496 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3498 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3499 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3500 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3502 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3504 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3507 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3508 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3510 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3511 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3512 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3515 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3518 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3520 ins->type = STACK_MP;
3522 MONO_ADD_INS (cfg->cbb, ins);
3529 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3533 MonoMethod *addr_method;
3536 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3539 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3541 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3542 /* emit_ldelema_2 depends on OP_LMUL */
3543 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3544 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3548 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3549 addr_method = mono_marshal_get_array_address (rank, element_size);
3550 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3556 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3558 MonoInst *ins = NULL;
3560 static MonoClass *runtime_helpers_class = NULL;
3561 if (! runtime_helpers_class)
3562 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3563 "System.Runtime.CompilerServices", "RuntimeHelpers");
3565 if (cmethod->klass == mono_defaults.string_class) {
3566 if (strcmp (cmethod->name, "get_Chars") == 0) {
3567 int dreg = alloc_ireg (cfg);
3568 int index_reg = alloc_preg (cfg);
3569 int mult_reg = alloc_preg (cfg);
3570 int add_reg = alloc_preg (cfg);
3572 #if SIZEOF_VOID_P == 8
3573 /* The array reg is 64 bits but the index reg is only 32 */
3574 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3576 index_reg = args [1]->dreg;
3578 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3580 #if defined(__i386__) || defined(__x86_64__)
3581 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3582 add_reg = ins->dreg;
3583 /* Avoid a warning */
3585 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3590 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3591 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3593 type_from_op (ins, NULL, NULL);
3595 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3596 int dreg = alloc_ireg (cfg);
3597 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3598 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3599 type_from_op (ins, NULL, NULL);
3602 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3603 int mult_reg = alloc_preg (cfg);
3604 int add_reg = alloc_preg (cfg);
3606 /* The corlib functions check for oob already. */
3607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3608 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3609 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3612 } else if (cmethod->klass == mono_defaults.object_class) {
3614 if (strcmp (cmethod->name, "GetType") == 0) {
3615 int dreg = alloc_preg (cfg);
3616 int vt_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3619 type_from_op (ins, NULL, NULL);
3622 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3623 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3624 int dreg = alloc_ireg (cfg);
3625 int t1 = alloc_ireg (cfg);
3627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3628 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3629 ins->type = STACK_I4;
3633 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3634 MONO_INST_NEW (cfg, ins, OP_NOP);
3635 MONO_ADD_INS (cfg->cbb, ins);
3639 } else if (cmethod->klass == mono_defaults.array_class) {
3640 if (cmethod->name [0] != 'g')
3643 if (strcmp (cmethod->name, "get_Rank") == 0) {
3644 int dreg = alloc_ireg (cfg);
3645 int vtable_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3647 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3648 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3649 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3650 type_from_op (ins, NULL, NULL);
3653 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3654 int dreg = alloc_ireg (cfg);
3656 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3657 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3658 type_from_op (ins, NULL, NULL);
3663 } else if (cmethod->klass == runtime_helpers_class) {
3665 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3666 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3670 } else if (cmethod->klass == mono_defaults.thread_class) {
3671 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3672 ins->dreg = alloc_preg (cfg);
3673 ins->type = STACK_OBJ;
3674 MONO_ADD_INS (cfg->cbb, ins);
3676 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3677 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3678 MONO_ADD_INS (cfg->cbb, ins);
3681 } else if (mini_class_is_system_array (cmethod->klass) &&
3682 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3683 MonoInst *addr, *store, *load;
3684 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3686 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3688 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3690 } else if (cmethod->klass->image == mono_defaults.corlib &&
3691 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3692 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3695 #if SIZEOF_VOID_P == 8
3696 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3697 /* 64 bit reads are already atomic */
3698 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3699 ins->dreg = mono_alloc_preg (cfg);
3700 ins->inst_basereg = args [0]->dreg;
3701 ins->inst_offset = 0;
3702 MONO_ADD_INS (cfg->cbb, ins);
3706 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3707 if (strcmp (cmethod->name, "Increment") == 0) {
3708 MonoInst *ins_iconst;
3711 if (fsig->params [0]->type == MONO_TYPE_I4)
3712 opcode = OP_ATOMIC_ADD_NEW_I4;
3713 #if SIZEOF_VOID_P == 8
3714 else if (fsig->params [0]->type == MONO_TYPE_I8)
3715 opcode = OP_ATOMIC_ADD_NEW_I8;
3718 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3719 ins_iconst->inst_c0 = 1;
3720 ins_iconst->dreg = mono_alloc_ireg (cfg);
3721 MONO_ADD_INS (cfg->cbb, ins_iconst);
3723 MONO_INST_NEW (cfg, ins, opcode);
3724 ins->dreg = mono_alloc_ireg (cfg);
3725 ins->inst_basereg = args [0]->dreg;
3726 ins->inst_offset = 0;
3727 ins->sreg2 = ins_iconst->dreg;
3728 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3729 MONO_ADD_INS (cfg->cbb, ins);
3731 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3732 MonoInst *ins_iconst;
3735 if (fsig->params [0]->type == MONO_TYPE_I4)
3736 opcode = OP_ATOMIC_ADD_NEW_I4;
3737 #if SIZEOF_VOID_P == 8
3738 else if (fsig->params [0]->type == MONO_TYPE_I8)
3739 opcode = OP_ATOMIC_ADD_NEW_I8;
3742 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3743 ins_iconst->inst_c0 = -1;
3744 ins_iconst->dreg = mono_alloc_ireg (cfg);
3745 MONO_ADD_INS (cfg->cbb, ins_iconst);
3747 MONO_INST_NEW (cfg, ins, opcode);
3748 ins->dreg = mono_alloc_ireg (cfg);
3749 ins->inst_basereg = args [0]->dreg;
3750 ins->inst_offset = 0;
3751 ins->sreg2 = ins_iconst->dreg;
3752 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3753 MONO_ADD_INS (cfg->cbb, ins);
3755 } else if (strcmp (cmethod->name, "Add") == 0) {
3758 if (fsig->params [0]->type == MONO_TYPE_I4)
3759 opcode = OP_ATOMIC_ADD_NEW_I4;
3760 #if SIZEOF_VOID_P == 8
3761 else if (fsig->params [0]->type == MONO_TYPE_I8)
3762 opcode = OP_ATOMIC_ADD_NEW_I8;
3766 MONO_INST_NEW (cfg, ins, opcode);
3767 ins->dreg = mono_alloc_ireg (cfg);
3768 ins->inst_basereg = args [0]->dreg;
3769 ins->inst_offset = 0;
3770 ins->sreg2 = args [1]->dreg;
3771 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3772 MONO_ADD_INS (cfg->cbb, ins);
3775 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3777 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3778 if (strcmp (cmethod->name, "Exchange") == 0) {
3781 if (fsig->params [0]->type == MONO_TYPE_I4)
3782 opcode = OP_ATOMIC_EXCHANGE_I4;
3783 #if SIZEOF_VOID_P == 8
3784 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3785 (fsig->params [0]->type == MONO_TYPE_I) ||
3786 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3787 opcode = OP_ATOMIC_EXCHANGE_I8;
3789 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3790 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3791 opcode = OP_ATOMIC_EXCHANGE_I4;
3796 MONO_INST_NEW (cfg, ins, opcode);
3797 ins->dreg = mono_alloc_ireg (cfg);
3798 ins->inst_basereg = args [0]->dreg;
3799 ins->inst_offset = 0;
3800 ins->sreg2 = args [1]->dreg;
3801 MONO_ADD_INS (cfg->cbb, ins);
3803 switch (fsig->params [0]->type) {
3805 ins->type = STACK_I4;
3809 ins->type = STACK_I8;
3811 case MONO_TYPE_OBJECT:
3812 ins->type = STACK_OBJ;
3815 g_assert_not_reached ();
3818 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3820 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3822 * Can't implement CompareExchange methods this way since they have
3823 * three arguments. We can implement one of the common cases, where the new
3824 * value is a constant.
3826 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3827 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3828 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3829 ins->dreg = alloc_ireg (cfg);
3830 ins->sreg1 = args [0]->dreg;
3831 ins->sreg2 = args [1]->dreg;
3832 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3833 ins->type = STACK_I4;
3834 MONO_ADD_INS (cfg->cbb, ins);
3836 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3838 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3842 } else if (cmethod->klass->image == mono_defaults.corlib) {
3843 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3844 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3845 MONO_INST_NEW (cfg, ins, OP_BREAK);
3846 MONO_ADD_INS (cfg->cbb, ins);
3849 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3850 && strcmp (cmethod->klass->name, "Environment") == 0) {
3851 #ifdef PLATFORM_WIN32
3852 EMIT_NEW_ICONST (cfg, ins, 1);
3854 EMIT_NEW_ICONST (cfg, ins, 0);
3858 } else if (cmethod->klass == mono_defaults.math_class) {
3860 * There is general branches code for Min/Max, but it does not work for
3862 * http://everything2.com/?node_id=1051618
3866 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3870 * This entry point could be used later for arbitrary method
3873 inline static MonoInst*
3874 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3875 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3877 if (method->klass == mono_defaults.string_class) {
3878 /* managed string allocation support */
3879 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3880 MonoInst *iargs [2];
3881 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3882 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3885 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3886 iargs [1] = args [0];
3887 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3894 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3896 MonoInst *store, *temp;
3899 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3900 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3903 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3904 * would be different than the MonoInst's used to represent arguments, and
3905 * the ldelema implementation can't deal with that.
3906 * Solution: When ldelema is used on an inline argument, create a var for
3907 * it, emit ldelema on that var, and emit the saving code below in
3908 * inline_method () if needed.
3910 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3911 cfg->args [i] = temp;
3912 /* This uses cfg->args [i] which is set by the preceeding line */
3913 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3914 store->cil_code = sp [0]->cil_code;
3919 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3920 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3922 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3924 check_inline_called_method_name_limit (MonoMethod *called_method)
3927 static char *limit = NULL;
3929 if (limit == NULL) {
3930 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3932 if (limit_string != NULL)
3933 limit = limit_string;
3935 limit = (char *) "";
3938 if (limit [0] != '\0') {
3939 char *called_method_name = mono_method_full_name (called_method, TRUE);
3941 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3942 g_free (called_method_name);
3944 //return (strncmp_result <= 0);
3945 return (strncmp_result == 0);
3952 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3954 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3957 static char *limit = NULL;
3959 if (limit == NULL) {
3960 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3961 if (limit_string != NULL) {
3962 limit = limit_string;
3964 limit = (char *) "";
3968 if (limit [0] != '\0') {
3969 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3971 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3972 g_free (caller_method_name);
3974 //return (strncmp_result <= 0);
3975 return (strncmp_result == 0);
3983 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3984 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3986 MonoInst *ins, *rvar = NULL;
3987 MonoMethodHeader *cheader;
3988 MonoBasicBlock *ebblock, *sbblock;
3990 MonoMethod *prev_inlined_method;
3991 MonoInst **prev_locals, **prev_args;
3992 MonoType **prev_arg_types;
3993 guint prev_real_offset;
3994 GHashTable *prev_cbb_hash;
3995 MonoBasicBlock **prev_cil_offset_to_bb;
3996 MonoBasicBlock *prev_cbb;
3997 unsigned char* prev_cil_start;
3998 guint32 prev_cil_offset_to_bb_len;
3999 MonoMethod *prev_current_method;
4000 MonoGenericContext *prev_generic_context;
4002 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4004 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4005 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4008 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4009 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4013 if (cfg->verbose_level > 2)
4014 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4016 if (!cmethod->inline_info) {
4017 mono_jit_stats.inlineable_methods++;
4018 cmethod->inline_info = 1;
4020 /* allocate space to store the return value */
4021 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4022 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4025 /* allocate local variables */
4026 cheader = mono_method_get_header (cmethod);
4027 prev_locals = cfg->locals;
4028 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4029 for (i = 0; i < cheader->num_locals; ++i)
4030 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4032 /* allocate start and end blocks */
4033 /* This is needed so if the inline is aborted, we can clean up */
4034 NEW_BBLOCK (cfg, sbblock);
4035 sbblock->real_offset = real_offset;
4037 NEW_BBLOCK (cfg, ebblock);
4038 ebblock->block_num = cfg->num_bblocks++;
4039 ebblock->real_offset = real_offset;
4041 prev_args = cfg->args;
4042 prev_arg_types = cfg->arg_types;
4043 prev_inlined_method = cfg->inlined_method;
4044 cfg->inlined_method = cmethod;
4045 cfg->ret_var_set = FALSE;
4046 prev_real_offset = cfg->real_offset;
4047 prev_cbb_hash = cfg->cbb_hash;
4048 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4049 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4050 prev_cil_start = cfg->cil_start;
4051 prev_cbb = cfg->cbb;
4052 prev_current_method = cfg->current_method;
4053 prev_generic_context = cfg->generic_context;
4055 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4057 cfg->inlined_method = prev_inlined_method;
4058 cfg->real_offset = prev_real_offset;
4059 cfg->cbb_hash = prev_cbb_hash;
4060 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4061 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4062 cfg->cil_start = prev_cil_start;
4063 cfg->locals = prev_locals;
4064 cfg->args = prev_args;
4065 cfg->arg_types = prev_arg_types;
4066 cfg->current_method = prev_current_method;
4067 cfg->generic_context = prev_generic_context;
4069 if ((costs >= 0 && costs < 60) || inline_allways) {
4070 if (cfg->verbose_level > 2)
4071 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4073 mono_jit_stats.inlined_methods++;
4075 /* always add some code to avoid block split failures */
4076 MONO_INST_NEW (cfg, ins, OP_NOP);
4077 MONO_ADD_INS (prev_cbb, ins);
4079 prev_cbb->next_bb = sbblock;
4080 link_bblock (cfg, prev_cbb, sbblock);
4083 * Get rid of the begin and end bblocks if possible to aid local
4086 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4088 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4089 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4091 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4092 MonoBasicBlock *prev = ebblock->in_bb [0];
4093 mono_merge_basic_blocks (cfg, prev, ebblock);
4101 * If the inlined method contains only a throw, then the ret var is not
4102 * set, so set it to a dummy value.
4104 if (!cfg->ret_var_set) {
4105 static double r8_0 = 0.0;
4107 switch (rvar->type) {
4109 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4112 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4117 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4120 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4121 ins->type = STACK_R8;
4122 ins->inst_p0 = (void*)&r8_0;
4123 ins->dreg = rvar->dreg;
4124 MONO_ADD_INS (cfg->cbb, ins);
4127 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4130 g_assert_not_reached ();
4134 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4139 if (cfg->verbose_level > 2)
4140 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4141 cfg->exception_type = MONO_EXCEPTION_NONE;
4142 mono_loader_clear_error ();
4144 /* This gets rid of the newly added bblocks */
4145 cfg->cbb = prev_cbb;
4151 * Some of these comments may well be out-of-date.
4152 * Design decisions: we do a single pass over the IL code (and we do bblock
4153 * splitting/merging in the few cases when it's required: a back jump to an IL
4154 * address that was not already seen as bblock starting point).
4155 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4156 * Complex operations are decomposed in simpler ones right away. We need to let the
4157 * arch-specific code peek and poke inside this process somehow (except when the
4158 * optimizations can take advantage of the full semantic info of coarse opcodes).
4159 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4160 * MonoInst->opcode initially is the IL opcode or some simplification of that
4161 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4162 * opcode with value bigger than OP_LAST.
4163 * At this point the IR can be handed over to an interpreter, a dumb code generator
4164 * or to the optimizing code generator that will translate it to SSA form.
4166 * Profiling directed optimizations.
4167 * We may compile by default with few or no optimizations and instrument the code
4168 * or the user may indicate what methods to optimize the most either in a config file
4169 * or through repeated runs where the compiler applies offline the optimizations to
4170 * each method and then decides if it was worth it.
4173 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4174 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4175 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4176 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4177 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4178 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4179 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4180 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4182 /* offset from br.s -> br like opcodes */
4183 #define BIG_BRANCH_OFFSET 13
4186 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4188 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4190 return b == NULL || b == bb;
4194 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4196 unsigned char *ip = start;
4197 unsigned char *target;
4200 MonoBasicBlock *bblock;
4201 const MonoOpcode *opcode;
4204 cli_addr = ip - start;
4205 i = mono_opcode_value ((const guint8 **)&ip, end);
4208 opcode = &mono_opcodes [i];
4209 switch (opcode->argument) {
4210 case MonoInlineNone:
4213 case MonoInlineString:
4214 case MonoInlineType:
4215 case MonoInlineField:
4216 case MonoInlineMethod:
4219 case MonoShortInlineR:
4226 case MonoShortInlineVar:
4227 case MonoShortInlineI:
4230 case MonoShortInlineBrTarget:
4231 target = start + cli_addr + 2 + (signed char)ip [1];
4232 GET_BBLOCK (cfg, bblock, target);
4235 GET_BBLOCK (cfg, bblock, ip);
4237 case MonoInlineBrTarget:
4238 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4239 GET_BBLOCK (cfg, bblock, target);
4242 GET_BBLOCK (cfg, bblock, ip);
4244 case MonoInlineSwitch: {
4245 guint32 n = read32 (ip + 1);
4248 cli_addr += 5 + 4 * n;
4249 target = start + cli_addr;
4250 GET_BBLOCK (cfg, bblock, target);
4252 for (j = 0; j < n; ++j) {
4253 target = start + cli_addr + (gint32)read32 (ip);
4254 GET_BBLOCK (cfg, bblock, target);
4264 g_assert_not_reached ();
4267 if (i == CEE_THROW) {
4268 unsigned char *bb_start = ip - 1;
4270 /* Find the start of the bblock containing the throw */
4272 while ((bb_start >= start) && !bblock) {
4273 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4277 bblock->out_of_line = 1;
4286 static inline MonoMethod *
4287 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4291 if (m->wrapper_type != MONO_WRAPPER_NONE)
4292 return mono_method_get_wrapper_data (m, token);
4294 method = mono_get_method_full (m->klass->image, token, klass, context);
4299 static inline MonoMethod *
4300 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4302 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4304 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4310 static inline MonoClass*
4311 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4315 if (method->wrapper_type != MONO_WRAPPER_NONE)
4316 klass = mono_method_get_wrapper_data (method, token);
4318 klass = mono_class_get_full (method->klass->image, token, context);
4320 mono_class_init (klass);
4325 * Returns TRUE if the JIT should abort inlining because "callee"
4326 * is influenced by security attributes.
4329 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4333 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4337 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4338 if (result == MONO_JIT_SECURITY_OK)
4341 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4342 /* Generate code to throw a SecurityException before the actual call/link */
4343 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4346 NEW_ICONST (cfg, args [0], 4);
4347 NEW_METHODCONST (cfg, args [1], caller);
4348 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4349 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4350 /* don't hide previous results */
4351 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4352 cfg->exception_data = result;
4360 method_access_exception (void)
4362 static MonoMethod *method = NULL;
4365 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4366 method = mono_class_get_method_from_name (secman->securitymanager,
4367 "MethodAccessException", 2);
4374 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4375 MonoBasicBlock *bblock, unsigned char *ip)
4377 MonoMethod *thrower = method_access_exception ();
4380 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4381 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4382 mono_emit_method_call (cfg, thrower, args, NULL);
4386 verification_exception (void)
4388 static MonoMethod *method = NULL;
4391 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4392 method = mono_class_get_method_from_name (secman->securitymanager,
4393 "VerificationException", 0);
4400 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4402 MonoMethod *thrower = verification_exception ();
4404 mono_emit_method_call (cfg, thrower, NULL, NULL);
4408 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4409 MonoBasicBlock *bblock, unsigned char *ip)
4411 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4412 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4413 gboolean is_safe = TRUE;
4415 if (!(caller_level >= callee_level ||
4416 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4417 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4422 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4426 method_is_safe (MonoMethod *method)
4429 if (strcmp (method->name, "unsafeMethod") == 0)
4436 * Check that the IL instructions at ip are the array initialization
4437 * sequence and return the pointer to the data and the size.
4440 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4443 * newarr[System.Int32]
4445 * ldtoken field valuetype ...
4446 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4448 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4449 guint32 token = read32 (ip + 7);
4450 guint32 field_token = read32 (ip + 2);
4451 guint32 field_index = field_token & 0xffffff;
4453 const char *data_ptr;
4455 MonoMethod *cmethod;
4456 MonoClass *dummy_class;
4457 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4463 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4466 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4468 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4469 case MONO_TYPE_BOOLEAN:
4473 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4474 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4475 case MONO_TYPE_CHAR:
4485 return NULL; /* stupid ARM FP swapped format */
4495 if (size > mono_type_size (field->type, &dummy_align))
4498 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4499 field_index = read32 (ip + 2) & 0xffffff;
4500 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4501 data_ptr = mono_image_rva_map (method->klass->image, rva);
4502 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4503 /* for aot code we do the lookup on load */
4504 if (aot && data_ptr)
4505 return GUINT_TO_POINTER (rva);
4512 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4514 char *method_fname = mono_method_full_name (method, TRUE);
4517 if (mono_method_get_header (method)->code_size == 0)
4518 method_code = g_strdup ("method body is empty.");
4520 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4521 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4522 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4523 g_free (method_fname);
4524 g_free (method_code);
4528 set_exception_object (MonoCompile *cfg, MonoException *exception)
4530 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4531 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4532 cfg->exception_ptr = exception;
4536 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4540 if (cfg->generic_sharing_context)
4541 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4543 type = &klass->byval_arg;
4544 return MONO_TYPE_IS_REFERENCE (type);
4548 * mono_decompose_array_access_opts:
4550 * Decompose array access opcodes.
4553 mono_decompose_array_access_opts (MonoCompile *cfg)
4555 MonoBasicBlock *bb, *first_bb;
4558 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4559 * can be executed anytime. It should be run before decompose_long
4563 * Create a dummy bblock and emit code into it so we can use the normal
4564 * code generation macros.
4566 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4567 first_bb = cfg->cbb;
4569 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4571 MonoInst *prev = NULL;
4573 MonoInst *iargs [3];
4576 if (!bb->has_array_access)
4579 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4581 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4587 for (ins = bb->code; ins; ins = ins->next) {
4588 switch (ins->opcode) {
4590 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4591 G_STRUCT_OFFSET (MonoArray, max_length));
4592 MONO_ADD_INS (cfg->cbb, dest);
4594 case OP_BOUNDS_CHECK:
4595 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4598 if (cfg->opt & MONO_OPT_SHARED) {
4599 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4600 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4601 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4602 iargs [2]->dreg = ins->sreg1;
4604 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4605 dest->dreg = ins->dreg;
4607 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4610 NEW_VTABLECONST (cfg, iargs [0], vtable);
4611 MONO_ADD_INS (cfg->cbb, iargs [0]);
4612 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4613 iargs [1]->dreg = ins->sreg1;
4615 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4616 dest->dreg = ins->dreg;
4623 g_assert (cfg->cbb == first_bb);
4625 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4626 /* Replace the original instruction with the new code sequence */
4628 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4629 first_bb->code = first_bb->last_ins = NULL;
4630 first_bb->in_count = first_bb->out_count = 0;
4631 cfg->cbb = first_bb;
4638 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4648 #ifdef MONO_ARCH_SOFT_FLOAT
4651 * mono_handle_soft_float:
4653 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4654 * similar to long support on 32 bit platforms. 32 bit float values require special
4655 * handling when used as locals, arguments, and in calls.
4656 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4659 mono_handle_soft_float (MonoCompile *cfg)
4661 MonoBasicBlock *bb, *first_bb;
4664 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4668 * Create a dummy bblock and emit code into it so we can use the normal
4669 * code generation macros.
4671 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4672 first_bb = cfg->cbb;
4674 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4676 MonoInst *prev = NULL;
4679 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4681 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4687 for (ins = bb->code; ins; ins = ins->next) {
4688 const char *spec = INS_INFO (ins->opcode);
4690 /* Most fp operations are handled automatically by opcode emulation */
4692 switch (ins->opcode) {
4695 d.vald = *(double*)ins->inst_p0;
4696 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4701 /* We load the r8 value */
4702 d.vald = *(float*)ins->inst_p0;
4703 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4707 ins->opcode = OP_LMOVE;
4710 ins->opcode = OP_MOVE;
4711 ins->sreg1 = ins->sreg1 + 1;
4714 ins->opcode = OP_MOVE;
4715 ins->sreg1 = ins->sreg1 + 2;
4718 int reg = ins->sreg1;
4720 ins->opcode = OP_SETLRET;
4722 ins->sreg1 = reg + 1;
4723 ins->sreg2 = reg + 2;
4726 case OP_LOADR8_MEMBASE:
4727 ins->opcode = OP_LOADI8_MEMBASE;
4729 case OP_STORER8_MEMBASE_REG:
4730 ins->opcode = OP_STOREI8_MEMBASE_REG;
4732 case OP_STORER4_MEMBASE_REG: {
4733 MonoInst *iargs [2];
4736 /* Arg 1 is the double value */
4737 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4738 iargs [0]->dreg = ins->sreg1;
4740 /* Arg 2 is the address to store to */
4741 addr_reg = mono_alloc_preg (cfg);
4742 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4743 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4747 case OP_LOADR4_MEMBASE: {
4748 MonoInst *iargs [1];
4752 addr_reg = mono_alloc_preg (cfg);
4753 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4754 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4755 conv->dreg = ins->dreg;
4760 case OP_FCALL_MEMBASE: {
4761 MonoCallInst *call = (MonoCallInst*)ins;
4762 if (call->signature->ret->type == MONO_TYPE_R4) {
4763 MonoCallInst *call2;
4764 MonoInst *iargs [1];
4767 /* Convert the call into a call returning an int */
4768 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4769 memcpy (call2, call, sizeof (MonoCallInst));
4770 switch (ins->opcode) {
4772 call2->inst.opcode = OP_CALL;
4775 call2->inst.opcode = OP_CALL_REG;
4777 case OP_FCALL_MEMBASE:
4778 call2->inst.opcode = OP_CALL_MEMBASE;
4781 g_assert_not_reached ();
4783 call2->inst.dreg = mono_alloc_ireg (cfg);
4784 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4786 /* FIXME: Optimize this */
4788 /* Emit an r4->r8 conversion */
4789 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4790 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4791 conv->dreg = ins->dreg;
4793 switch (ins->opcode) {
4795 ins->opcode = OP_LCALL;
4798 ins->opcode = OP_LCALL_REG;
4800 case OP_FCALL_MEMBASE:
4801 ins->opcode = OP_LCALL_MEMBASE;
4804 g_assert_not_reached ();
4810 MonoJitICallInfo *info;
4811 MonoInst *iargs [2];
4812 MonoInst *call, *cmp, *br;
4814 /* Convert fcompare+fbcc to icall+icompare+beq */
4816 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4819 /* Create dummy MonoInst's for the arguments */
4820 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4821 iargs [0]->dreg = ins->sreg1;
4822 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4823 iargs [1]->dreg = ins->sreg2;
4825 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4827 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4828 cmp->sreg1 = call->dreg;
4830 MONO_ADD_INS (cfg->cbb, cmp);
4832 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4833 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4834 br->inst_true_bb = ins->next->inst_true_bb;
4835 br->inst_false_bb = ins->next->inst_false_bb;
4836 MONO_ADD_INS (cfg->cbb, br);
4838 /* The call sequence might include fp ins */
4841 /* Skip fbcc or fccc */
4842 NULLIFY_INS (ins->next);
4850 MonoJitICallInfo *info;
4851 MonoInst *iargs [2];
4854 /* Convert fccc to icall+icompare+iceq */
4856 info = mono_find_jit_opcode_emulation (ins->opcode);
4859 /* Create dummy MonoInst's for the arguments */
4860 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4861 iargs [0]->dreg = ins->sreg1;
4862 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4863 iargs [1]->dreg = ins->sreg2;
4865 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4868 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4870 /* The call sequence might include fp ins */
4875 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4876 mono_print_ins (ins);
4877 g_assert_not_reached ();
4882 g_assert (cfg->cbb == first_bb);
4884 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4885 /* Replace the original instruction with the new code sequence */
4887 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4888 first_bb->code = first_bb->last_ins = NULL;
4889 first_bb->in_count = first_bb->out_count = 0;
4890 cfg->cbb = first_bb;
4897 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4900 mono_decompose_long_opts (cfg);
4906 * mono_method_to_ir: translates IL into basic blocks containing trees
4909 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4910 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4911 guint inline_offset, gboolean is_virtual_call)
4913 MonoInst *ins, **sp, **stack_start;
4914 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4915 MonoMethod *cmethod, *method_definition;
4916 MonoInst **arg_array;
4917 MonoMethodHeader *header;
4919 guint32 token, ins_flag;
4921 MonoClass *constrained_call = NULL;
4922 unsigned char *ip, *end, *target, *err_pos;
4923 static double r8_0 = 0.0;
4924 MonoMethodSignature *sig;
4925 MonoGenericContext *generic_context = NULL;
4926 MonoGenericContainer *generic_container = NULL;
4927 MonoType **param_types;
4928 GList *bb_recheck = NULL, *tmp;
4929 int i, n, start_new_bblock, dreg;
4930 int num_calls = 0, inline_costs = 0;
4931 int breakpoint_id = 0;
4933 MonoBoolean security, pinvoke;
4934 MonoSecurityManager* secman = NULL;
4935 MonoDeclSecurityActions actions;
4936 GSList *class_inits = NULL;
4937 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4940 /* serialization and xdomain stuff may need access to private fields and methods */
4941 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4942 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4943 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4944 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4945 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4946 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4948 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4950 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4951 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4952 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4953 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4955 image = method->klass->image;
4956 header = mono_method_get_header (method);
4957 generic_container = mono_method_get_generic_container (method);
4958 sig = mono_method_signature (method);
4959 num_args = sig->hasthis + sig->param_count;
4960 ip = (unsigned char*)header->code;
4961 cfg->cil_start = ip;
4962 end = ip + header->code_size;
4963 mono_jit_stats.cil_code_size += header->code_size;
4965 method_definition = method;
4966 while (method_definition->is_inflated) {
4967 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4968 method_definition = imethod->declaring;
4971 /* SkipVerification is not allowed if core-clr is enabled */
4972 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4974 dont_verify_stloc = TRUE;
4977 if (!dont_verify && mini_method_verify (cfg, method_definition))
4978 goto exception_exit;
4980 if (sig->is_inflated)
4981 generic_context = mono_method_get_context (method);
4982 else if (generic_container)
4983 generic_context = &generic_container->context;
4984 cfg->generic_context = generic_context;
4986 if (!cfg->generic_sharing_context)
4987 g_assert (!sig->has_type_parameters);
4989 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4990 g_assert (method->is_inflated);
4991 g_assert (mono_method_get_context (method)->method_inst);
4993 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4994 g_assert (sig->generic_param_count);
4996 if (cfg->method == method) {
4997 cfg->real_offset = 0;
4999 cfg->real_offset = inline_offset;
5002 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5003 cfg->cil_offset_to_bb_len = header->code_size;
5005 cfg->current_method = method;
5007 if (cfg->verbose_level > 2)
5008 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5010 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5012 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5013 for (n = 0; n < sig->param_count; ++n)
5014 param_types [n + sig->hasthis] = sig->params [n];
5015 cfg->arg_types = param_types;
5017 dont_inline = g_list_prepend (dont_inline, method);
5018 if (cfg->method == method) {
5020 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5021 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5024 NEW_BBLOCK (cfg, start_bblock);
5025 cfg->bb_entry = start_bblock;
5026 start_bblock->cil_code = NULL;
5027 start_bblock->cil_length = 0;
5030 NEW_BBLOCK (cfg, end_bblock);
5031 cfg->bb_exit = end_bblock;
5032 end_bblock->cil_code = NULL;
5033 end_bblock->cil_length = 0;
5034 g_assert (cfg->num_bblocks == 2);
5036 arg_array = cfg->args;
5038 if (header->num_clauses) {
5039 cfg->spvars = g_hash_table_new (NULL, NULL);
5040 cfg->exvars = g_hash_table_new (NULL, NULL);
5042 /* handle exception clauses */
5043 for (i = 0; i < header->num_clauses; ++i) {
5044 MonoBasicBlock *try_bb;
5045 MonoExceptionClause *clause = &header->clauses [i];
5046 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5047 try_bb->real_offset = clause->try_offset;
5048 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5049 tblock->real_offset = clause->handler_offset;
5050 tblock->flags |= BB_EXCEPTION_HANDLER;
5052 link_bblock (cfg, try_bb, tblock);
5054 if (*(ip + clause->handler_offset) == CEE_POP)
5055 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5057 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5058 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5059 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5060 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5061 MONO_ADD_INS (tblock, ins);
5063 /* todo: is a fault block unsafe to optimize? */
5064 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5065 tblock->flags |= BB_EXCEPTION_UNSAFE;
5069 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5071 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5073 /* catch and filter blocks get the exception object on the stack */
5074 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5075 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5076 MonoInst *dummy_use;
5078 /* mostly like handle_stack_args (), but just sets the input args */
5079 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5080 tblock->in_scount = 1;
5081 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5082 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5085 * Add a dummy use for the exvar so its liveness info will be
5089 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5091 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5092 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5093 tblock->real_offset = clause->data.filter_offset;
5094 tblock->in_scount = 1;
5095 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5096 /* The filter block shares the exvar with the handler block */
5097 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5098 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5099 MONO_ADD_INS (tblock, ins);
5103 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5104 clause->data.catch_class &&
5105 cfg->generic_sharing_context &&
5106 mono_class_check_context_used (clause->data.catch_class)) {
5107 if (mono_method_get_context (method)->method_inst)
5108 GENERIC_SHARING_FAILURE (CEE_NOP);
5111 * In shared generic code with catch
5112 * clauses containing type variables
5113 * the exception handling code has to
5114 * be able to get to the rgctx.
5115 * Therefore we have to make sure that
5116 * the vtable/mrgctx argument (for
5117 * static or generic methods) or the
5118 * "this" argument (for non-static
5119 * methods) are live.
5121 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5122 mini_method_get_context (method)->method_inst) {
5123 mono_get_vtable_var (cfg);
5125 MonoInst *dummy_use;
5127 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5132 arg_array = alloca (sizeof (MonoInst *) * num_args);
5133 cfg->cbb = start_bblock;
5134 cfg->args = arg_array;
5135 mono_save_args (cfg, sig, inline_args);
5138 /* FIRST CODE BLOCK */
5139 NEW_BBLOCK (cfg, bblock);
5140 bblock->cil_code = ip;
5144 ADD_BBLOCK (cfg, bblock);
5146 if (cfg->method == method) {
5147 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5148 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5149 MONO_INST_NEW (cfg, ins, OP_BREAK);
5150 MONO_ADD_INS (bblock, ins);
5154 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5155 secman = mono_security_manager_get_methods ();
5157 security = (secman && mono_method_has_declsec (method));
5158 /* at this point having security doesn't mean we have any code to generate */
5159 if (security && (cfg->method == method)) {
5160 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5161 * And we do not want to enter the next section (with allocation) if we
5162 * have nothing to generate */
5163 security = mono_declsec_get_demands (method, &actions);
5166 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5167 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5169 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5170 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5171 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5173 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5174 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5178 mono_custom_attrs_free (custom);
5181 custom = mono_custom_attrs_from_class (wrapped->klass);
5182 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5186 mono_custom_attrs_free (custom);
5189 /* not a P/Invoke after all */
5194 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5195 /* we use a separate basic block for the initialization code */
5196 NEW_BBLOCK (cfg, init_localsbb);
5197 cfg->bb_init = init_localsbb;
5198 init_localsbb->real_offset = cfg->real_offset;
5199 start_bblock->next_bb = init_localsbb;
5200 init_localsbb->next_bb = bblock;
5201 link_bblock (cfg, start_bblock, init_localsbb);
5202 link_bblock (cfg, init_localsbb, bblock);
5204 cfg->cbb = init_localsbb;
5206 start_bblock->next_bb = bblock;
5207 link_bblock (cfg, start_bblock, bblock);
5210 /* at this point we know, if security is TRUE, that some code needs to be generated */
5211 if (security && (cfg->method == method)) {
5214 mono_jit_stats.cas_demand_generation++;
5216 if (actions.demand.blob) {
5217 /* Add code for SecurityAction.Demand */
5218 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5219 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5220 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5221 mono_emit_method_call (cfg, secman->demand, args, NULL);
5223 if (actions.noncasdemand.blob) {
5224 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5225 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5226 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5227 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5228 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5229 mono_emit_method_call (cfg, secman->demand, args, NULL);
5231 if (actions.demandchoice.blob) {
5232 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5233 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5234 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5235 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5236 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5240 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5242 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5245 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5246 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5247 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5248 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5249 if (!(method->klass && method->klass->image &&
5250 mono_security_core_clr_is_platform_image (method->klass->image))) {
5251 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5255 if (!method_is_safe (method))
5256 emit_throw_verification_exception (cfg, bblock, ip);
5259 if (header->code_size == 0)
5262 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5267 if (cfg->method == method)
5268 mono_debug_init_method (cfg, bblock, breakpoint_id);
5270 for (n = 0; n < header->num_locals; ++n) {
5271 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5276 /* add a check for this != NULL to inlined methods */
5277 if (is_virtual_call) {
5280 NEW_ARGLOAD (cfg, arg_ins, 0);
5281 MONO_ADD_INS (cfg->cbb, arg_ins);
5282 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5283 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5284 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5287 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5288 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5291 start_new_bblock = 0;
5295 if (cfg->method == method)
5296 cfg->real_offset = ip - header->code;
5298 cfg->real_offset = inline_offset;
5303 if (start_new_bblock) {
5304 bblock->cil_length = ip - bblock->cil_code;
5305 if (start_new_bblock == 2) {
5306 g_assert (ip == tblock->cil_code);
5308 GET_BBLOCK (cfg, tblock, ip);
5310 bblock->next_bb = tblock;
5313 start_new_bblock = 0;
5314 for (i = 0; i < bblock->in_scount; ++i) {
5315 if (cfg->verbose_level > 3)
5316 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5317 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5321 g_slist_free (class_inits);
5324 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5325 link_bblock (cfg, bblock, tblock);
5326 if (sp != stack_start) {
5327 handle_stack_args (cfg, stack_start, sp - stack_start);
5329 CHECK_UNVERIFIABLE (cfg);
5331 bblock->next_bb = tblock;
5334 for (i = 0; i < bblock->in_scount; ++i) {
5335 if (cfg->verbose_level > 3)
5336 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5337 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5340 g_slist_free (class_inits);
5345 bblock->real_offset = cfg->real_offset;
5347 if ((cfg->method == method) && cfg->coverage_info) {
5348 guint32 cil_offset = ip - header->code;
5349 cfg->coverage_info->data [cil_offset].cil_code = ip;
5351 /* TODO: Use an increment here */
5352 #if defined(__i386__)
5353 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5354 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5356 MONO_ADD_INS (cfg->cbb, ins);
5358 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5359 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5363 if (cfg->verbose_level > 3)
5364 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5369 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5371 MONO_ADD_INS (bblock, ins);
5377 CHECK_STACK_OVF (1);
5378 n = (*ip)-CEE_LDARG_0;
5380 EMIT_NEW_ARGLOAD (cfg, ins, n);
5388 CHECK_STACK_OVF (1);
5389 n = (*ip)-CEE_LDLOC_0;
5391 EMIT_NEW_LOCLOAD (cfg, ins, n);
5402 n = (*ip)-CEE_STLOC_0;
5405 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5408 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5409 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5410 /* Optimize reg-reg moves away */
5412 * Can't optimize other opcodes, since sp[0] might point to
5413 * the last ins of a decomposed opcode.
5415 sp [0]->dreg = (cfg)->locals [n]->dreg;
5417 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5425 CHECK_STACK_OVF (1);
5428 EMIT_NEW_ARGLOAD (cfg, ins, n);
5434 CHECK_STACK_OVF (1);
5437 NEW_ARGLOADA (cfg, ins, n);
5438 MONO_ADD_INS (cfg->cbb, ins);
5448 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5450 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5455 CHECK_STACK_OVF (1);
5458 EMIT_NEW_LOCLOAD (cfg, ins, n);
5464 CHECK_STACK_OVF (1);
5465 CHECK_LOCAL (ip [1]);
5468 * ldloca inhibits many optimizations so try to get rid of it in common
5471 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5472 gboolean skip = FALSE;
5474 /* From the INITOBJ case */
5475 token = read32 (ip + 4);
5476 klass = mini_get_class (method, token, generic_context);
5477 CHECK_TYPELOAD (klass);
5478 if (generic_class_is_reference_type (cfg, klass)) {
5479 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5480 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5481 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5482 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5483 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5495 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5503 CHECK_LOCAL (ip [1]);
5504 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5506 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5511 CHECK_STACK_OVF (1);
5512 EMIT_NEW_PCONST (cfg, ins, NULL);
5513 ins->type = STACK_OBJ;
5518 CHECK_STACK_OVF (1);
5519 EMIT_NEW_ICONST (cfg, ins, -1);
5532 CHECK_STACK_OVF (1);
5533 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5539 CHECK_STACK_OVF (1);
5541 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5547 CHECK_STACK_OVF (1);
5548 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5554 CHECK_STACK_OVF (1);
5555 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5556 ins->type = STACK_I8;
5557 ins->dreg = alloc_dreg (cfg, STACK_I8);
5559 ins->inst_l = (gint64)read64 (ip);
5560 MONO_ADD_INS (bblock, ins);
5566 /* FIXME: we should really allocate this only late in the compilation process */
5567 mono_domain_lock (cfg->domain);
5568 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5569 mono_domain_unlock (cfg->domain);
5571 CHECK_STACK_OVF (1);
5572 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5573 ins->type = STACK_R8;
5574 ins->dreg = alloc_dreg (cfg, STACK_R8);
5578 MONO_ADD_INS (bblock, ins);
5586 /* FIXME: we should really allocate this only late in the compilation process */
5587 mono_domain_lock (cfg->domain);
5588 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5589 mono_domain_unlock (cfg->domain);
5591 CHECK_STACK_OVF (1);
5592 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5593 ins->type = STACK_R8;
5594 ins->dreg = alloc_dreg (cfg, STACK_R8);
5598 MONO_ADD_INS (bblock, ins);
5605 MonoInst *temp, *store;
5607 CHECK_STACK_OVF (1);
5611 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5612 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5614 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5617 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5630 if (sp [0]->type == STACK_R8)
5631 /* we need to pop the value from the x86 FP stack */
5632 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5639 if (stack_start != sp)
5641 token = read32 (ip + 1);
5642 /* FIXME: check the signature matches */
5643 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5648 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5649 GENERIC_SHARING_FAILURE (CEE_JMP);
5651 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5652 if (check_linkdemand (cfg, method, cmethod))
5654 CHECK_CFG_EXCEPTION;
5659 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5662 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5664 /* Handle tail calls similarly to calls */
5665 n = fsig->param_count + fsig->hasthis;
5667 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5668 call->method = cmethod;
5669 call->tail_call = TRUE;
5670 call->signature = mono_method_signature (cmethod);
5671 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5672 call->inst.inst_p0 = cmethod;
5673 for (i = 0; i < n; ++i)
5674 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5676 mono_arch_emit_call (cfg, call);
5677 MONO_ADD_INS (bblock, (MonoInst*)call);
5680 for (i = 0; i < num_args; ++i)
5681 /* Prevent arguments from being optimized away */
5682 arg_array [i]->flags |= MONO_INST_VOLATILE;
5684 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5685 ins = (MonoInst*)call;
5686 ins->inst_p0 = cmethod;
5687 MONO_ADD_INS (bblock, ins);
5691 start_new_bblock = 1;
5696 case CEE_CALLVIRT: {
5697 MonoInst *addr = NULL;
5698 MonoMethodSignature *fsig = NULL;
5700 int virtual = *ip == CEE_CALLVIRT;
5701 int calli = *ip == CEE_CALLI;
5702 gboolean pass_imt_from_rgctx = FALSE;
5703 MonoInst *imt_arg = NULL;
5704 gboolean pass_vtable = FALSE;
5705 gboolean pass_mrgctx = FALSE;
5706 MonoInst *vtable_arg = NULL;
5707 gboolean check_this = FALSE;
5710 token = read32 (ip + 1);
5717 if (method->wrapper_type != MONO_WRAPPER_NONE)
5718 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5720 fsig = mono_metadata_parse_signature (image, token);
5722 n = fsig->param_count + fsig->hasthis;
5724 MonoMethod *cil_method;
5726 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5727 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5728 cil_method = cmethod;
5729 } else if (constrained_call) {
5730 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5732 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5733 cil_method = cmethod;
5738 if (!dont_verify && !cfg->skip_visibility) {
5739 MonoMethod *target_method = cil_method;
5740 if (method->is_inflated) {
5741 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5743 if (!mono_method_can_access_method (method_definition, target_method) &&
5744 !mono_method_can_access_method (method, cil_method))
5745 METHOD_ACCESS_FAILURE;
5748 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5749 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5751 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5752 /* MS.NET seems to silently convert this to a callvirt */
5755 if (!cmethod->klass->inited)
5756 if (!mono_class_init (cmethod->klass))
5759 if (mono_method_signature (cmethod)->pinvoke) {
5760 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5761 fsig = mono_method_signature (wrapper);
5762 } else if (constrained_call) {
5763 fsig = mono_method_signature (cmethod);
5765 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5768 mono_save_token_info (cfg, image, token, cmethod);
5770 n = fsig->param_count + fsig->hasthis;
5772 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5773 if (check_linkdemand (cfg, method, cmethod))
5775 CHECK_CFG_EXCEPTION;
5778 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5779 mini_class_is_system_array (cmethod->klass)) {
5780 array_rank = cmethod->klass->rank;
5783 if (cmethod->string_ctor)
5784 g_assert_not_reached ();
5787 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5790 if (!cfg->generic_sharing_context && cmethod)
5791 g_assert (!mono_method_check_context_used (cmethod));
5795 //g_assert (!virtual || fsig->hasthis);
5799 if (constrained_call) {
5801 * We have the `constrained.' prefix opcode.
5803 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5807 * The type parameter is instantiated as a valuetype,
5808 * but that type doesn't override the method we're
5809 * calling, so we need to box `this'.
5811 dreg = alloc_dreg (cfg, STACK_VTYPE);
5812 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5813 ins->klass = constrained_call;
5814 sp [0] = handle_box (cfg, ins, constrained_call);
5815 } else if (!constrained_call->valuetype) {
5816 int dreg = alloc_preg (cfg);
5819 * The type parameter is instantiated as a reference
5820 * type. We have a managed pointer on the stack, so
5821 * we need to dereference it here.
5823 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5824 ins->type = STACK_OBJ;
5826 } else if (cmethod->klass->valuetype)
5828 constrained_call = NULL;
5831 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5835 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5836 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5837 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5838 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5839 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5842 * Pass vtable iff target method might
5843 * be shared, which means that sharing
5844 * is enabled for its class and its
5845 * context is sharable (and it's not a
5848 if (sharing_enabled && context_sharable &&
5849 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5853 if (cmethod && mini_method_get_context (cmethod) &&
5854 mini_method_get_context (cmethod)->method_inst) {
5855 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5856 MonoGenericContext *context = mini_method_get_context (cmethod);
5857 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5859 g_assert (!pass_vtable);
5861 if (sharing_enabled && context_sharable)
5865 if (cfg->generic_sharing_context && cmethod) {
5866 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5868 context_used = mono_method_check_context_used (cmethod);
5870 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5871 /* Generic method interface
5872 calls are resolved via a
5873 helper function and don't
5875 if (!cmethod_context || !cmethod_context->method_inst)
5876 pass_imt_from_rgctx = TRUE;
5880 * If a shared method calls another
5881 * shared method then the caller must
5882 * have a generic sharing context
5883 * because the magic trampoline
5884 * requires it. FIXME: We shouldn't
5885 * have to force the vtable/mrgctx
5886 * variable here. Instead there
5887 * should be a flag in the cfg to
5888 * request a generic sharing context.
5890 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5891 mono_get_vtable_var (cfg);
5898 EMIT_GET_RGCTX (rgctx, context_used);
5899 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5901 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5903 CHECK_TYPELOAD (cmethod->klass);
5904 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5909 g_assert (!vtable_arg);
5914 EMIT_GET_RGCTX (rgctx, context_used);
5915 vtable_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5917 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5920 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5921 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5928 if (pass_imt_from_rgctx) {
5931 g_assert (!pass_vtable);
5934 EMIT_GET_RGCTX (rgctx, context_used);
5935 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5936 MONO_RGCTX_INFO_METHOD);
5942 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5943 check->sreg1 = sp [0]->dreg;
5944 MONO_ADD_INS (cfg->cbb, check);
5947 /* Calling virtual generic methods */
5948 if (cmethod && virtual &&
5949 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5950 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5951 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5952 mono_method_signature (cmethod)->generic_param_count) {
5953 MonoInst *this_temp, *this_arg_temp, *store;
5954 MonoInst *iargs [4];
5956 g_assert (mono_method_signature (cmethod)->is_inflated);
5958 /* Prevent inlining of methods that contain indirect calls */
5961 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5962 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5963 MONO_ADD_INS (bblock, store);
5965 /* FIXME: This should be a managed pointer */
5966 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5968 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5972 EMIT_GET_RGCTX (rgctx, context_used);
5973 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5974 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5975 addr = mono_emit_jit_icall (cfg,
5976 mono_helper_compile_generic_method, iargs);
5978 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5979 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5980 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5983 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5985 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5986 if (!MONO_TYPE_IS_VOID (fsig->ret))
5995 /* FIXME: runtime generic context pointer for jumps? */
5996 /* FIXME: handle this for generic sharing eventually */
5997 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5998 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6001 /* FIXME: runtime generic context pointer for jumps? */
6002 GENERIC_SHARING_FAILURE (*ip);
6004 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6007 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6008 call->tail_call = TRUE;
6009 call->method = cmethod;
6010 call->signature = mono_method_signature (cmethod);
6013 /* Handle tail calls similarly to calls */
6014 call->inst.opcode = OP_TAILCALL;
6016 mono_arch_emit_call (cfg, call);
6019 * We implement tail calls by storing the actual arguments into the
6020 * argument variables, then emitting a CEE_JMP.
6022 for (i = 0; i < n; ++i) {
6023 /* Prevent argument from being register allocated */
6024 arg_array [i]->flags |= MONO_INST_VOLATILE;
6025 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6029 ins = (MonoInst*)call;
6030 ins->inst_p0 = cmethod;
6031 ins->inst_p1 = arg_array [0];
6032 MONO_ADD_INS (bblock, ins);
6033 link_bblock (cfg, bblock, end_bblock);
6034 start_new_bblock = 1;
6035 /* skip CEE_RET as well */
6041 /* Conversion to a JIT intrinsic */
6042 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6043 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6044 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6055 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6056 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6057 mono_method_check_inlining (cfg, cmethod) &&
6058 !g_list_find (dont_inline, cmethod)) {
6060 gboolean allways = FALSE;
6062 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6063 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6064 /* Prevent inlining of methods that call wrappers */
6066 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6070 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6072 cfg->real_offset += 5;
6075 if (!MONO_TYPE_IS_VOID (fsig->ret))
6076 /* *sp is already set by inline_method */
6079 inline_costs += costs;
6085 inline_costs += 10 * num_calls++;
6087 /* Tail recursion elimination */
6088 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6089 gboolean has_vtargs = FALSE;
6092 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6095 /* keep it simple */
6096 for (i = fsig->param_count - 1; i >= 0; i--) {
6097 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6102 for (i = 0; i < n; ++i)
6103 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6104 MONO_INST_NEW (cfg, ins, OP_BR);
6105 MONO_ADD_INS (bblock, ins);
6106 tblock = start_bblock->out_bb [0];
6107 link_bblock (cfg, bblock, tblock);
6108 ins->inst_target_bb = tblock;
6109 start_new_bblock = 1;
6111 /* skip the CEE_RET, too */
6112 if (ip_in_bb (cfg, bblock, ip + 5))
6122 /* Generic sharing */
6123 /* FIXME: only do this for generic methods if
6124 they are not shared! */
6126 (cmethod->klass->valuetype ||
6127 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6128 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6129 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6130 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6131 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6132 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6137 g_assert (cfg->generic_sharing_context && cmethod);
6141 * We are compiling a call to a
6142 * generic method from shared code,
6143 * which means that we have to look up
6144 * the method in the rgctx and do an
6148 EMIT_GET_RGCTX (rgctx, context_used);
6149 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6152 /* Indirect calls */
6154 g_assert (!imt_arg);
6156 if (*ip == CEE_CALL)
6157 g_assert (context_used);
6158 else if (*ip == CEE_CALLI)
6159 g_assert (!vtable_arg);
6161 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6162 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6164 /* Prevent inlining of methods with indirect calls */
6168 #ifdef MONO_ARCH_RGCTX_REG
6170 int rgctx_reg = mono_alloc_preg (cfg);
6172 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6173 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6174 call = (MonoCallInst*)ins;
6175 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6176 cfg->uses_rgctx_reg = TRUE;
6181 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6183 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6184 if (fsig->pinvoke && !fsig->ret->byref) {
6188 * Native code might return non register sized integers
6189 * without initializing the upper bits.
6191 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6192 case OP_LOADI1_MEMBASE:
6193 widen_op = OP_ICONV_TO_I1;
6195 case OP_LOADU1_MEMBASE:
6196 widen_op = OP_ICONV_TO_U1;
6198 case OP_LOADI2_MEMBASE:
6199 widen_op = OP_ICONV_TO_I2;
6201 case OP_LOADU2_MEMBASE:
6202 widen_op = OP_ICONV_TO_U2;
6208 if (widen_op != -1) {
6209 int dreg = alloc_preg (cfg);
6212 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6213 widen->type = ins->type;
6230 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6231 if (sp [fsig->param_count]->type == STACK_OBJ) {
6232 MonoInst *iargs [2];
6235 iargs [1] = sp [fsig->param_count];
6237 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6240 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6241 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6242 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6243 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6245 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6248 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6249 if (!cmethod->klass->element_class->valuetype && !readonly)
6250 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6253 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6256 g_assert_not_reached ();
6264 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6266 if (!MONO_TYPE_IS_VOID (fsig->ret))
6277 #ifdef MONO_ARCH_RGCTX_REG
6279 int rgctx_reg = mono_alloc_preg (cfg);
6281 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6282 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6283 call = (MonoCallInst*)ins;
6284 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6285 cfg->uses_rgctx_reg = TRUE;
6289 } else if (imt_arg) {
6290 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6292 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6295 if (!MONO_TYPE_IS_VOID (fsig->ret))
6303 if (cfg->method != method) {
6304 /* return from inlined method */
6306 * If in_count == 0, that means the ret is unreachable due to
6307 * being preceeded by a throw. In that case, inline_method () will
6308 * handle setting the return value
6309 * (test case: test_0_inline_throw ()).
6311 if (return_var && cfg->cbb->in_count) {
6315 //g_assert (returnvar != -1);
6316 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6317 cfg->ret_var_set = TRUE;
6321 MonoType *ret_type = mono_method_signature (method)->ret;
6323 g_assert (!return_var);
6326 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6329 if (!cfg->vret_addr) {
6332 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6334 EMIT_NEW_RETLOADA (cfg, ret_addr);
6336 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6337 ins->klass = mono_class_from_mono_type (ret_type);
6340 #ifdef MONO_ARCH_SOFT_FLOAT
6341 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6342 MonoInst *iargs [1];
6346 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6347 mono_arch_emit_setret (cfg, method, conv);
6349 mono_arch_emit_setret (cfg, method, *sp);
6352 mono_arch_emit_setret (cfg, method, *sp);
6357 if (sp != stack_start)
6359 MONO_INST_NEW (cfg, ins, OP_BR);
6361 ins->inst_target_bb = end_bblock;
6362 MONO_ADD_INS (bblock, ins);
6363 link_bblock (cfg, bblock, end_bblock);
6364 start_new_bblock = 1;
6368 MONO_INST_NEW (cfg, ins, OP_BR);
6370 target = ip + 1 + (signed char)(*ip);
6372 GET_BBLOCK (cfg, tblock, target);
6373 link_bblock (cfg, bblock, tblock);
6374 CHECK_BBLOCK (target, ip, tblock);
6375 ins->inst_target_bb = tblock;
6376 if (sp != stack_start) {
6377 handle_stack_args (cfg, stack_start, sp - stack_start);
6379 CHECK_UNVERIFIABLE (cfg);
6381 MONO_ADD_INS (bblock, ins);
6382 start_new_bblock = 1;
6383 inline_costs += BRANCH_COST;
6397 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6399 target = ip + 1 + *(signed char*)ip;
6405 inline_costs += BRANCH_COST;
6409 MONO_INST_NEW (cfg, ins, OP_BR);
6412 target = ip + 4 + (gint32)read32(ip);
6414 GET_BBLOCK (cfg, tblock, target);
6415 link_bblock (cfg, bblock, tblock);
6416 CHECK_BBLOCK (target, ip, tblock);
6417 ins->inst_target_bb = tblock;
6418 if (sp != stack_start) {
6419 handle_stack_args (cfg, stack_start, sp - stack_start);
6421 CHECK_UNVERIFIABLE (cfg);
6424 MONO_ADD_INS (bblock, ins);
6426 start_new_bblock = 1;
6427 inline_costs += BRANCH_COST;
6434 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6435 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6436 guint32 opsize = is_short ? 1 : 4;
6438 CHECK_OPSIZE (opsize);
6440 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6443 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6448 GET_BBLOCK (cfg, tblock, target);
6449 link_bblock (cfg, bblock, tblock);
6450 CHECK_BBLOCK (target, ip, tblock);
6451 GET_BBLOCK (cfg, tblock, ip);
6452 link_bblock (cfg, bblock, tblock);
6454 if (sp != stack_start) {
6455 handle_stack_args (cfg, stack_start, sp - stack_start);
6456 CHECK_UNVERIFIABLE (cfg);
6459 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6460 cmp->sreg1 = sp [0]->dreg;
6461 type_from_op (cmp, sp [0], NULL);
6464 #if SIZEOF_VOID_P == 4
6465 if (cmp->opcode == OP_LCOMPARE_IMM) {
6466 /* Convert it to OP_LCOMPARE */
6467 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6468 ins->type = STACK_I8;
6469 ins->dreg = alloc_dreg (cfg, STACK_I8);
6471 MONO_ADD_INS (bblock, ins);
6472 cmp->opcode = OP_LCOMPARE;
6473 cmp->sreg2 = ins->dreg;
6476 MONO_ADD_INS (bblock, cmp);
6478 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6479 type_from_op (ins, sp [0], NULL);
6480 MONO_ADD_INS (bblock, ins);
6481 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6482 GET_BBLOCK (cfg, tblock, target);
6483 ins->inst_true_bb = tblock;
6484 GET_BBLOCK (cfg, tblock, ip);
6485 ins->inst_false_bb = tblock;
6486 start_new_bblock = 2;
6489 inline_costs += BRANCH_COST;
6504 MONO_INST_NEW (cfg, ins, *ip);
6506 target = ip + 4 + (gint32)read32(ip);
6512 inline_costs += BRANCH_COST;
6516 MonoBasicBlock **targets;
6517 MonoBasicBlock *default_bblock;
6518 MonoJumpInfoBBTable *table;
6520 int offset_reg = alloc_preg (cfg);
6521 int target_reg = alloc_preg (cfg);
6522 int table_reg = alloc_preg (cfg);
6523 int sum_reg = alloc_preg (cfg);
6528 n = read32 (ip + 1);
6531 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6535 CHECK_OPSIZE (n * sizeof (guint32));
6536 target = ip + n * sizeof (guint32);
6538 GET_BBLOCK (cfg, default_bblock, target);
6540 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6541 for (i = 0; i < n; ++i) {
6542 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6543 targets [i] = tblock;
6547 if (sp != stack_start) {
6549 * Link the current bb with the targets as well, so handle_stack_args
6550 * will set their in_stack correctly.
6552 link_bblock (cfg, bblock, default_bblock);
6553 for (i = 0; i < n; ++i)
6554 link_bblock (cfg, bblock, targets [i]);
6556 handle_stack_args (cfg, stack_start, sp - stack_start);
6558 CHECK_UNVERIFIABLE (cfg);
6561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6565 for (i = 0; i < n; ++i)
6566 link_bblock (cfg, bblock, targets [i]);
6568 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6569 table->table = targets;
6570 table->table_size = n;
6573 /* ARM implements SWITCH statements differently */
6574 /* FIXME: Make it use the generic implementation */
6575 /* the backend code will deal with aot vs normal case */
6576 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6577 ins->sreg1 = src1->dreg;
6578 ins->inst_p0 = table;
6579 ins->inst_many_bb = targets;
6580 ins->klass = GUINT_TO_POINTER (n);
6581 MONO_ADD_INS (cfg->cbb, ins);
6583 if (sizeof (gpointer) == 8)
6584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6588 #if SIZEOF_VOID_P == 8
6589 /* The upper word might not be zero, and we add it to a 64 bit address later */
6590 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6593 if (cfg->compile_aot) {
6594 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6596 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6597 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6598 ins->inst_p0 = table;
6599 ins->dreg = table_reg;
6600 MONO_ADD_INS (cfg->cbb, ins);
6603 /* FIXME: Use load_memindex */
6604 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6606 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6608 start_new_bblock = 1;
6609 inline_costs += (BRANCH_COST * 2);
6629 dreg = alloc_freg (cfg);
6632 dreg = alloc_lreg (cfg);
6635 dreg = alloc_preg (cfg);
6638 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6639 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6640 ins->flags |= ins_flag;
6642 MONO_ADD_INS (bblock, ins);
6657 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6658 ins->flags |= ins_flag;
6660 MONO_ADD_INS (bblock, ins);
6668 MONO_INST_NEW (cfg, ins, (*ip));
6670 ins->sreg1 = sp [0]->dreg;
6671 ins->sreg2 = sp [1]->dreg;
6672 type_from_op (ins, sp [0], sp [1]);
6674 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6676 /* Use the immediate opcodes if possible */
6677 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6678 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6679 if (imm_opcode != -1) {
6680 ins->opcode = imm_opcode;
6681 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6684 sp [1]->opcode = OP_NOP;
6688 MONO_ADD_INS ((cfg)->cbb, (ins));
6691 mono_decompose_opcode (cfg, ins);
6708 MONO_INST_NEW (cfg, ins, (*ip));
6710 ins->sreg1 = sp [0]->dreg;
6711 ins->sreg2 = sp [1]->dreg;
6712 type_from_op (ins, sp [0], sp [1]);
6714 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6715 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6717 /* FIXME: Pass opcode to is_inst_imm */
6719 /* Use the immediate opcodes if possible */
6720 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6723 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6724 if (imm_opcode != -1) {
6725 ins->opcode = imm_opcode;
6726 if (sp [1]->opcode == OP_I8CONST) {
6727 #if SIZEOF_VOID_P == 8
6728 ins->inst_imm = sp [1]->inst_l;
6730 ins->inst_ls_word = sp [1]->inst_ls_word;
6731 ins->inst_ms_word = sp [1]->inst_ms_word;
6735 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6738 sp [1]->opcode = OP_NOP;
6741 MONO_ADD_INS ((cfg)->cbb, (ins));
6744 mono_decompose_opcode (cfg, ins);
6757 case CEE_CONV_OVF_I8:
6758 case CEE_CONV_OVF_U8:
6762 /* Special case this earlier so we have long constants in the IR */
6763 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6764 int data = sp [-1]->inst_c0;
6765 sp [-1]->opcode = OP_I8CONST;
6766 sp [-1]->type = STACK_I8;
6767 #if SIZEOF_VOID_P == 8
6768 if ((*ip) == CEE_CONV_U8)
6769 sp [-1]->inst_c0 = (guint32)data;
6771 sp [-1]->inst_c0 = data;
6773 sp [-1]->inst_ls_word = data;
6774 if ((*ip) == CEE_CONV_U8)
6775 sp [-1]->inst_ms_word = 0;
6777 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6779 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6786 case CEE_CONV_OVF_I4:
6787 case CEE_CONV_OVF_I1:
6788 case CEE_CONV_OVF_I2:
6789 case CEE_CONV_OVF_I:
6790 case CEE_CONV_OVF_U:
6793 if (sp [-1]->type == STACK_R8) {
6794 ADD_UNOP (CEE_CONV_OVF_I8);
6801 case CEE_CONV_OVF_U1:
6802 case CEE_CONV_OVF_U2:
6803 case CEE_CONV_OVF_U4:
6806 if (sp [-1]->type == STACK_R8) {
6807 ADD_UNOP (CEE_CONV_OVF_U8);
6814 case CEE_CONV_OVF_I1_UN:
6815 case CEE_CONV_OVF_I2_UN:
6816 case CEE_CONV_OVF_I4_UN:
6817 case CEE_CONV_OVF_I8_UN:
6818 case CEE_CONV_OVF_U1_UN:
6819 case CEE_CONV_OVF_U2_UN:
6820 case CEE_CONV_OVF_U4_UN:
6821 case CEE_CONV_OVF_U8_UN:
6822 case CEE_CONV_OVF_I_UN:
6823 case CEE_CONV_OVF_U_UN:
6833 case CEE_ADD_OVF_UN:
6835 case CEE_MUL_OVF_UN:
6837 case CEE_SUB_OVF_UN:
6845 token = read32 (ip + 1);
6846 klass = mini_get_class (method, token, generic_context);
6847 CHECK_TYPELOAD (klass);
6849 if (generic_class_is_reference_type (cfg, klass)) {
6850 MonoInst *store, *load;
6851 int dreg = alloc_preg (cfg);
6853 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6854 load->flags |= ins_flag;
6855 MONO_ADD_INS (cfg->cbb, load);
6857 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6858 store->flags |= ins_flag;
6859 MONO_ADD_INS (cfg->cbb, store);
6861 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6873 token = read32 (ip + 1);
6874 klass = mini_get_class (method, token, generic_context);
6875 CHECK_TYPELOAD (klass);
6877 /* Optimize the common ldobj+stloc combination */
6887 loc_index = ip [5] - CEE_STLOC_0;
6894 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6895 CHECK_LOCAL (loc_index);
6897 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6898 ins->dreg = cfg->locals [loc_index]->dreg;
6904 /* Optimize the ldobj+stobj combination */
6905 /* The reference case ends up being a load+store anyway */
6906 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6911 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6927 CHECK_STACK_OVF (1);
6929 n = read32 (ip + 1);
6931 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6932 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6933 ins->type = STACK_OBJ;
6936 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6937 MonoInst *iargs [1];
6939 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6940 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6942 if (cfg->opt & MONO_OPT_SHARED) {
6943 MonoInst *iargs [3];
6945 if (cfg->compile_aot) {
6946 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6948 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6949 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6950 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6951 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6952 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6954 if (bblock->out_of_line) {
6955 MonoInst *iargs [2];
6957 if (cfg->method->klass->image == mono_defaults.corlib) {
6959 * Avoid relocations in AOT and save some space by using a
6960 * version of helper_ldstr specialized to mscorlib.
6962 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6963 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6965 /* Avoid creating the string object */
6966 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6967 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6968 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6972 if (cfg->compile_aot) {
6973 NEW_LDSTRCONST (cfg, ins, image, n);
6975 MONO_ADD_INS (bblock, ins);
6978 NEW_PCONST (cfg, ins, NULL);
6979 ins->type = STACK_OBJ;
6980 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6982 MONO_ADD_INS (bblock, ins);
6991 MonoInst *iargs [2];
6992 MonoMethodSignature *fsig;
6997 token = read32 (ip + 1);
6998 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7001 fsig = mono_method_get_signature (cmethod, image, token);
7003 mono_save_token_info (cfg, image, token, cmethod);
7005 if (!mono_class_init (cmethod->klass))
7008 if (cfg->generic_sharing_context)
7009 context_used = mono_method_check_context_used (cmethod);
7011 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7012 if (check_linkdemand (cfg, method, cmethod))
7014 CHECK_CFG_EXCEPTION;
7015 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7016 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7019 n = fsig->param_count;
7023 * Generate smaller code for the common newobj <exception> instruction in
7024 * argument checking code.
7026 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7027 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7028 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7029 MonoInst *iargs [3];
7033 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7036 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7040 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7045 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7048 g_assert_not_reached ();
7056 /* move the args to allow room for 'this' in the first position */
7062 /* check_call_signature () requires sp[0] to be set */
7063 this_ins.type = STACK_OBJ;
7065 if (check_call_signature (cfg, fsig, sp))
7070 if (mini_class_is_system_array (cmethod->klass)) {
7071 g_assert (!context_used);
7072 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7074 /* Avoid varargs in the common case */
7075 if (fsig->param_count == 1)
7076 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7077 else if (fsig->param_count == 2)
7078 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7080 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7081 } else if (cmethod->string_ctor) {
7082 g_assert (!context_used);
7083 /* we simply pass a null pointer */
7084 EMIT_NEW_PCONST (cfg, *sp, NULL);
7085 /* now call the string ctor */
7086 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7088 MonoInst* callvirt_this_arg = NULL;
7090 if (cmethod->klass->valuetype) {
7091 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7092 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7093 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7098 * The code generated by mini_emit_virtual_call () expects
7099 * iargs [0] to be a boxed instance, but luckily the vcall
7100 * will be transformed into a normal call there.
7102 } else if (context_used) {
7103 MonoInst *rgctx, *data;
7106 EMIT_GET_RGCTX (rgctx, context_used);
7107 if (cfg->opt & MONO_OPT_SHARED)
7108 rgctx_info = MONO_RGCTX_INFO_KLASS;
7110 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7111 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7113 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7116 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7118 CHECK_TYPELOAD (cmethod->klass);
7121 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7122 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7123 * As a workaround, we call class cctors before allocating objects.
7125 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7126 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7127 if (cfg->verbose_level > 2)
7128 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7129 class_inits = g_slist_prepend (class_inits, vtable);
7132 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7137 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7139 /* Now call the actual ctor */
7140 /* Avoid virtual calls to ctors if possible */
7141 if (cmethod->klass->marshalbyref)
7142 callvirt_this_arg = sp [0];
7144 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7145 mono_method_check_inlining (cfg, cmethod) &&
7146 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7147 !g_list_find (dont_inline, cmethod)) {
7150 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7151 cfg->real_offset += 5;
7154 inline_costs += costs - 5;
7157 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7159 } else if (context_used &&
7160 (cmethod->klass->valuetype ||
7161 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7162 MonoInst *rgctx, *cmethod_addr;
7164 g_assert (!callvirt_this_arg);
7166 EMIT_GET_RGCTX (rgctx, context_used);
7167 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7168 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7170 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7173 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7177 if (alloc == NULL) {
7179 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7180 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7194 token = read32 (ip + 1);
7195 klass = mini_get_class (method, token, generic_context);
7196 CHECK_TYPELOAD (klass);
7197 if (sp [0]->type != STACK_OBJ)
7200 if (cfg->generic_sharing_context)
7201 context_used = mono_class_check_context_used (klass);
7204 MonoInst *rgctx, *args [2];
7206 g_assert (!method->klass->valuetype);
7212 EMIT_GET_RGCTX (rgctx, context_used);
7213 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7214 MONO_RGCTX_INFO_KLASS);
7216 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7220 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7221 MonoMethod *mono_castclass;
7222 MonoInst *iargs [1];
7225 mono_castclass = mono_marshal_get_castclass (klass);
7228 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7229 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7230 g_assert (costs > 0);
7233 cfg->real_offset += 5;
7238 inline_costs += costs;
7241 ins = handle_castclass (cfg, klass, *sp);
7251 token = read32 (ip + 1);
7252 klass = mini_get_class (method, token, generic_context);
7253 CHECK_TYPELOAD (klass);
7254 if (sp [0]->type != STACK_OBJ)
7257 if (cfg->generic_sharing_context)
7258 context_used = mono_class_check_context_used (klass);
7261 MonoInst *rgctx, *args [2];
7267 EMIT_GET_RGCTX (rgctx, context_used);
7268 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7270 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7274 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7275 MonoMethod *mono_isinst;
7276 MonoInst *iargs [1];
7279 mono_isinst = mono_marshal_get_isinst (klass);
7282 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7283 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7284 g_assert (costs > 0);
7287 cfg->real_offset += 5;
7292 inline_costs += costs;
7295 ins = handle_isinst (cfg, klass, *sp);
7302 case CEE_UNBOX_ANY: {
7303 MonoInst *rgctx = NULL;
7308 token = read32 (ip + 1);
7309 klass = mini_get_class (method, token, generic_context);
7310 CHECK_TYPELOAD (klass);
7312 mono_save_token_info (cfg, image, token, klass);
7314 if (cfg->generic_sharing_context)
7315 context_used = mono_class_check_context_used (klass);
7317 if (generic_class_is_reference_type (cfg, klass)) {
7320 MonoInst *iargs [2];
7322 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
7327 EMIT_GET_RGCTX (rgctx, context_used);
7328 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7329 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7333 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7334 MonoMethod *mono_castclass;
7335 MonoInst *iargs [1];
7338 mono_castclass = mono_marshal_get_castclass (klass);
7341 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7342 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7344 g_assert (costs > 0);
7347 cfg->real_offset += 5;
7351 inline_costs += costs;
7353 ins = handle_castclass (cfg, klass, *sp);
7362 EMIT_GET_RGCTX (rgctx, context_used);
7364 if (mono_class_is_nullable (klass)) {
7365 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7372 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7378 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7391 token = read32 (ip + 1);
7392 klass = mini_get_class (method, token, generic_context);
7393 CHECK_TYPELOAD (klass);
7395 mono_save_token_info (cfg, image, token, klass);
7397 if (cfg->generic_sharing_context)
7398 context_used = mono_class_check_context_used (klass);
7400 if (generic_class_is_reference_type (cfg, klass)) {
7406 if (klass == mono_defaults.void_class)
7408 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7410 /* frequent check in generic code: box (struct), brtrue */
7411 if (!mono_class_is_nullable (klass) &&
7412 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7413 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7415 MONO_INST_NEW (cfg, ins, OP_BR);
7416 if (*ip == CEE_BRTRUE_S) {
7419 target = ip + 1 + (signed char)(*ip);
7424 target = ip + 4 + (gint)(read32 (ip));
7427 GET_BBLOCK (cfg, tblock, target);
7428 link_bblock (cfg, bblock, tblock);
7429 CHECK_BBLOCK (target, ip, tblock);
7430 ins->inst_target_bb = tblock;
7431 GET_BBLOCK (cfg, tblock, ip);
7433 * This leads to some inconsistency, since the two bblocks are not
7434 * really connected, but it is needed for handling stack arguments
7435 * correct (See test_0_box_brtrue_opt_regress_81102).
7437 link_bblock (cfg, bblock, tblock);
7438 if (sp != stack_start) {
7439 handle_stack_args (cfg, stack_start, sp - stack_start);
7441 CHECK_UNVERIFIABLE (cfg);
7443 MONO_ADD_INS (bblock, ins);
7444 start_new_bblock = 1;
7453 EMIT_GET_RGCTX (rgctx, context_used);
7454 if (cfg->opt & MONO_OPT_SHARED)
7455 rgctx_info = MONO_RGCTX_INFO_KLASS;
7457 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7458 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7459 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, rgctx, data);
7461 *sp++ = handle_box (cfg, val, klass);
7469 MonoInst *rgctx = NULL;
7474 token = read32 (ip + 1);
7475 klass = mini_get_class (method, token, generic_context);
7476 CHECK_TYPELOAD (klass);
7478 mono_save_token_info (cfg, image, token, klass);
7480 if (cfg->generic_sharing_context)
7481 context_used = mono_class_check_context_used (klass);
7484 EMIT_GET_RGCTX (rgctx, context_used);
7486 if (mono_class_is_nullable (klass)) {
7489 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7490 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7494 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7504 MonoClassField *field;
7508 if (*ip == CEE_STFLD) {
7515 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7517 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7520 token = read32 (ip + 1);
7521 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7522 field = mono_method_get_wrapper_data (method, token);
7523 klass = field->parent;
7526 field = mono_field_from_token (image, token, &klass, generic_context);
7530 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7531 FIELD_ACCESS_FAILURE;
7532 mono_class_init (klass);
7534 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7535 if (*ip == CEE_STFLD) {
7536 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7538 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7539 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7540 MonoInst *iargs [5];
7543 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7544 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7545 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7549 if (cfg->opt & MONO_OPT_INLINE) {
7550 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7551 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7552 g_assert (costs > 0);
7555 cfg->real_offset += 5;
7558 inline_costs += costs;
7561 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7566 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7568 store->flags |= ins_flag;
7575 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7576 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7577 MonoInst *iargs [4];
7580 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7581 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7582 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7583 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7584 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7585 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7587 g_assert (costs > 0);
7590 cfg->real_offset += 5;
7594 inline_costs += costs;
7597 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7601 if (sp [0]->type == STACK_VTYPE) {
7604 /* Have to compute the address of the variable */
7606 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7608 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7610 g_assert (var->klass == klass);
7612 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7616 if (*ip == CEE_LDFLDA) {
7617 dreg = alloc_preg (cfg);
7619 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7620 ins->klass = mono_class_from_mono_type (field->type);
7621 ins->type = STACK_MP;
7626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7627 load->flags |= ins_flag;
7638 MonoClassField *field;
7639 gpointer addr = NULL;
7640 gboolean is_special_static;
7643 token = read32 (ip + 1);
7645 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7646 field = mono_method_get_wrapper_data (method, token);
7647 klass = field->parent;
7650 field = mono_field_from_token (image, token, &klass, generic_context);
7653 mono_class_init (klass);
7654 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7655 FIELD_ACCESS_FAILURE;
7658 * We can only support shared generic static
7659 * field access on architectures where the
7660 * trampoline code has been extended to handle
7661 * the generic class init.
7663 #ifndef MONO_ARCH_VTABLE_REG
7664 GENERIC_SHARING_FAILURE (*ip);
7667 if (cfg->generic_sharing_context)
7668 context_used = mono_class_check_context_used (klass);
7670 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7672 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7673 * to be called here.
7675 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7676 mono_class_vtable (cfg->domain, klass);
7677 CHECK_TYPELOAD (klass);
7679 mono_domain_lock (cfg->domain);
7680 if (cfg->domain->special_static_fields)
7681 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7682 mono_domain_unlock (cfg->domain);
7684 is_special_static = mono_class_field_is_special_static (field);
7686 /* Generate IR to compute the field address */
7688 if ((cfg->opt & MONO_OPT_SHARED) ||
7689 (cfg->compile_aot && is_special_static) ||
7690 (context_used && is_special_static)) {
7691 MonoInst *iargs [2];
7693 g_assert (field->parent);
7694 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7698 EMIT_GET_RGCTX (rgctx, context_used);
7699 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7701 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7703 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7704 } else if (context_used) {
7705 MonoInst *rgctx, *static_data;
7708 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7709 method->klass->name_space, method->klass->name, method->name,
7710 depth, field->offset);
7713 if (mono_class_needs_cctor_run (klass, method)) {
7715 MonoInst *vtable, *rgctx;
7717 EMIT_GET_RGCTX (rgctx, context_used);
7718 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7720 // FIXME: This doesn't work since it tries to pass the argument
7721 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7723 * The vtable pointer is always passed in a register regardless of
7724 * the calling convention, so assign it manually, and make a call
7725 * using a signature without parameters.
7727 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7728 #ifdef MONO_ARCH_VTABLE_REG
7729 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7730 cfg->uses_vtable_reg = TRUE;
7737 * The pointer we're computing here is
7739 * super_info.static_data + field->offset
7741 EMIT_GET_RGCTX (rgctx, context_used);
7742 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7744 if (field->offset == 0) {
7747 int addr_reg = mono_alloc_preg (cfg);
7748 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7750 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7751 MonoInst *iargs [2];
7753 g_assert (field->parent);
7754 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7755 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7756 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7758 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7760 CHECK_TYPELOAD (klass);
7762 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7763 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7764 if (cfg->verbose_level > 2)
7765 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7766 class_inits = g_slist_prepend (class_inits, vtable);
7768 if (cfg->run_cctors) {
7770 /* This makes so that inline cannot trigger */
7771 /* .cctors: too many apps depend on them */
7772 /* running with a specific order... */
7773 if (! vtable->initialized)
7775 ex = mono_runtime_class_init_full (vtable, FALSE);
7777 set_exception_object (cfg, ex);
7778 goto exception_exit;
7782 addr = (char*)vtable->data + field->offset;
7784 if (cfg->compile_aot)
7785 EMIT_NEW_SFLDACONST (cfg, ins, field);
7787 EMIT_NEW_PCONST (cfg, ins, addr);
7790 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7791 * This could be later optimized to do just a couple of
7792 * memory dereferences with constant offsets.
7794 MonoInst *iargs [1];
7795 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7796 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7800 /* Generate IR to do the actual load/store operation */
7802 if (*ip == CEE_LDSFLDA) {
7803 ins->klass = mono_class_from_mono_type (field->type);
7805 } else if (*ip == CEE_STSFLD) {
7810 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7811 store->flags |= ins_flag;
7814 gboolean is_const = FALSE;
7815 MonoVTable *vtable = NULL;
7817 if (!context_used) {
7818 vtable = mono_class_vtable (cfg->domain, klass);
7819 CHECK_TYPELOAD (klass);
7821 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7822 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7823 gpointer addr = (char*)vtable->data + field->offset;
7824 int ro_type = field->type->type;
7825 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7826 ro_type = field->type->data.klass->enum_basetype->type;
7828 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7831 case MONO_TYPE_BOOLEAN:
7833 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7837 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7840 case MONO_TYPE_CHAR:
7842 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7846 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7851 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7855 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7860 case MONO_TYPE_STRING:
7861 case MONO_TYPE_OBJECT:
7862 case MONO_TYPE_CLASS:
7863 case MONO_TYPE_SZARRAY:
7865 case MONO_TYPE_FNPTR:
7866 case MONO_TYPE_ARRAY:
7867 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7868 type_to_eval_stack_type ((cfg), field->type, *sp);
7873 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7878 case MONO_TYPE_VALUETYPE:
7888 CHECK_STACK_OVF (1);
7890 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7891 load->flags |= ins_flag;
7903 token = read32 (ip + 1);
7904 klass = mini_get_class (method, token, generic_context);
7905 CHECK_TYPELOAD (klass);
7906 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7907 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7918 const char *data_ptr;
7925 token = read32 (ip + 1);
7927 klass = mini_get_class (method, token, generic_context);
7928 CHECK_TYPELOAD (klass);
7930 if (cfg->generic_sharing_context)
7931 context_used = mono_class_check_context_used (klass);
7937 /* FIXME: Decompose later to help abcrem */
7940 EMIT_GET_RGCTX (rgctx, context_used);
7941 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7946 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7948 if (cfg->opt & MONO_OPT_SHARED) {
7949 /* Decompose now to avoid problems with references to the domainvar */
7950 MonoInst *iargs [3];
7952 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7953 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7956 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7958 /* Decompose later since it is needed by abcrem */
7959 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7960 ins->dreg = alloc_preg (cfg);
7961 ins->sreg1 = sp [0]->dreg;
7962 ins->inst_newa_class = klass;
7963 ins->type = STACK_OBJ;
7965 MONO_ADD_INS (cfg->cbb, ins);
7966 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7967 cfg->cbb->has_array_access = TRUE;
7969 /* Needed so mono_emit_load_get_addr () gets called */
7970 mono_get_got_var (cfg);
7980 * we inline/optimize the initialization sequence if possible.
7981 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7982 * for small sizes open code the memcpy
7983 * ensure the rva field is big enough
7985 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7986 MonoMethod *memcpy_method = get_memcpy_method ();
7987 MonoInst *iargs [3];
7988 int add_reg = alloc_preg (cfg);
7990 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7991 if (cfg->compile_aot) {
7992 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7994 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7996 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7997 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8006 if (sp [0]->type != STACK_OBJ)
8009 dreg = alloc_preg (cfg);
8010 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8011 ins->dreg = alloc_preg (cfg);
8012 ins->sreg1 = sp [0]->dreg;
8013 ins->type = STACK_I4;
8014 MONO_ADD_INS (cfg->cbb, ins);
8015 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8016 cfg->cbb->has_array_access = TRUE;
8024 if (sp [0]->type != STACK_OBJ)
8027 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8029 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8030 CHECK_TYPELOAD (klass);
8031 /* we need to make sure that this array is exactly the type it needs
8032 * to be for correctness. the wrappers are lax with their usage
8033 * so we need to ignore them here
8035 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8036 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8039 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8043 case CEE_LDELEM_ANY:
8054 case CEE_LDELEM_REF: {
8060 if (*ip == CEE_LDELEM_ANY) {
8062 token = read32 (ip + 1);
8063 klass = mini_get_class (method, token, generic_context);
8064 CHECK_TYPELOAD (klass);
8065 mono_class_init (klass);
8068 klass = array_access_to_klass (*ip);
8070 if (sp [0]->type != STACK_OBJ)
8073 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8075 if (sp [1]->opcode == OP_ICONST) {
8076 int array_reg = sp [0]->dreg;
8077 int index_reg = sp [1]->dreg;
8078 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8080 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8081 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8083 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8084 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8087 if (*ip == CEE_LDELEM_ANY)
8100 case CEE_STELEM_REF:
8101 case CEE_STELEM_ANY: {
8107 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8109 if (*ip == CEE_STELEM_ANY) {
8111 token = read32 (ip + 1);
8112 klass = mini_get_class (method, token, generic_context);
8113 CHECK_TYPELOAD (klass);
8114 mono_class_init (klass);
8117 klass = array_access_to_klass (*ip);
8119 if (sp [0]->type != STACK_OBJ)
8122 /* storing a NULL doesn't need any of the complex checks in stelemref */
8123 if (generic_class_is_reference_type (cfg, klass) &&
8124 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8125 MonoMethod* helper = mono_marshal_get_stelemref ();
8126 MonoInst *iargs [3];
8128 if (sp [0]->type != STACK_OBJ)
8130 if (sp [2]->type != STACK_OBJ)
8137 mono_emit_method_call (cfg, helper, iargs, NULL);
8139 if (sp [1]->opcode == OP_ICONST) {
8140 int array_reg = sp [0]->dreg;
8141 int index_reg = sp [1]->dreg;
8142 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8144 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8145 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8147 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8148 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8152 if (*ip == CEE_STELEM_ANY)
8159 case CEE_CKFINITE: {
8163 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8164 ins->sreg1 = sp [0]->dreg;
8165 ins->dreg = alloc_freg (cfg);
8166 ins->type = STACK_R8;
8167 MONO_ADD_INS (bblock, ins);
8170 mono_decompose_opcode (cfg, ins);
8175 case CEE_REFANYVAL: {
8176 MonoInst *src_var, *src;
8178 int klass_reg = alloc_preg (cfg);
8179 int dreg = alloc_preg (cfg);
8182 MONO_INST_NEW (cfg, ins, *ip);
8185 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8186 CHECK_TYPELOAD (klass);
8187 mono_class_init (klass);
8189 if (cfg->generic_sharing_context)
8190 context_used = mono_class_check_context_used (klass);
8193 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8195 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8196 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8200 MonoInst *rgctx, *klass_ins;
8202 EMIT_GET_RGCTX (rgctx, context_used);
8203 klass_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8206 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8207 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8209 mini_emit_class_check (cfg, klass_reg, klass);
8211 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8212 ins->type = STACK_MP;
8217 case CEE_MKREFANY: {
8218 MonoInst *loc, *addr;
8221 MONO_INST_NEW (cfg, ins, *ip);
8224 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8225 CHECK_TYPELOAD (klass);
8226 mono_class_init (klass);
8228 if (cfg->generic_sharing_context)
8229 context_used = mono_class_check_context_used (klass);
8231 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8232 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8235 MonoInst *rgctx, *const_ins;
8236 int type_reg = alloc_preg (cfg);
8238 EMIT_GET_RGCTX (rgctx, context_used);
8239 const_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8242 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8243 } else if (cfg->compile_aot) {
8244 int const_reg = alloc_preg (cfg);
8245 int type_reg = alloc_preg (cfg);
8247 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8248 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8252 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8253 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8257 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8258 ins->type = STACK_VTYPE;
8259 ins->klass = mono_defaults.typed_reference_class;
8266 MonoClass *handle_class;
8268 CHECK_STACK_OVF (1);
8271 n = read32 (ip + 1);
8273 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8274 handle = mono_method_get_wrapper_data (method, n);
8275 handle_class = mono_method_get_wrapper_data (method, n + 1);
8276 if (handle_class == mono_defaults.typehandle_class)
8277 handle = &((MonoClass*)handle)->byval_arg;
8280 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8284 mono_class_init (handle_class);
8285 if (cfg->generic_sharing_context) {
8286 if (handle_class == mono_defaults.typehandle_class) {
8287 /* If we get a MONO_TYPE_CLASS
8288 then we need to provide the
8290 instantiation of it. */
8291 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8294 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8295 } else if (handle_class == mono_defaults.fieldhandle_class)
8296 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8297 else if (handle_class == mono_defaults.methodhandle_class)
8298 context_used = mono_method_check_context_used (handle);
8300 g_assert_not_reached ();
8303 if (cfg->opt & MONO_OPT_SHARED) {
8304 MonoInst *addr, *vtvar, *iargs [3];
8305 int method_context_used;
8307 if (cfg->generic_sharing_context)
8308 method_context_used = mono_method_check_context_used (method);
8310 method_context_used = 0;
8312 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8314 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8315 EMIT_NEW_ICONST (cfg, iargs [1], n);
8316 if (method_context_used) {
8319 EMIT_GET_RGCTX (rgctx, method_context_used);
8320 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8321 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8323 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8324 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8326 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8330 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8332 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8333 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8334 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8335 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8336 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8337 MonoClass *tclass = mono_class_from_mono_type (handle);
8339 mono_class_init (tclass);
8343 EMIT_GET_RGCTX (rgctx, context_used);
8344 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8345 } else if (cfg->compile_aot) {
8346 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8348 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8350 ins->type = STACK_OBJ;
8351 ins->klass = cmethod->klass;
8354 MonoInst *addr, *vtvar;
8356 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8361 EMIT_GET_RGCTX (rgctx, context_used);
8362 if (handle_class == mono_defaults.typehandle_class) {
8363 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8364 mono_class_from_mono_type (handle),
8365 MONO_RGCTX_INFO_TYPE);
8366 } else if (handle_class == mono_defaults.methodhandle_class) {
8367 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8368 handle, MONO_RGCTX_INFO_METHOD);
8369 } else if (handle_class == mono_defaults.fieldhandle_class) {
8370 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8371 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8373 g_assert_not_reached ();
8375 } else if (cfg->compile_aot) {
8376 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8378 EMIT_NEW_PCONST (cfg, ins, handle);
8380 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8381 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8382 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8392 MONO_INST_NEW (cfg, ins, OP_THROW);
8394 ins->sreg1 = sp [0]->dreg;
8396 bblock->out_of_line = TRUE;
8397 MONO_ADD_INS (bblock, ins);
8398 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8399 MONO_ADD_INS (bblock, ins);
8402 link_bblock (cfg, bblock, end_bblock);
8403 start_new_bblock = 1;
8405 case CEE_ENDFINALLY:
8406 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8407 MONO_ADD_INS (bblock, ins);
8409 start_new_bblock = 1;
8412 * Control will leave the method so empty the stack, otherwise
8413 * the next basic block will start with a nonempty stack.
8415 while (sp != stack_start) {
8423 if (*ip == CEE_LEAVE) {
8425 target = ip + 5 + (gint32)read32(ip + 1);
8428 target = ip + 2 + (signed char)(ip [1]);
8431 /* empty the stack */
8432 while (sp != stack_start) {
8437 * If this leave statement is in a catch block, check for a
8438 * pending exception, and rethrow it if necessary.
8440 for (i = 0; i < header->num_clauses; ++i) {
8441 MonoExceptionClause *clause = &header->clauses [i];
8444 * Use <= in the final comparison to handle clauses with multiple
8445 * leave statements, like in bug #78024.
8446 * The ordering of the exception clauses guarantees that we find the
8449 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8451 MonoBasicBlock *dont_throw;
8456 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8459 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8461 NEW_BBLOCK (cfg, dont_throw);
8464 * Currently, we allways rethrow the abort exception, despite the
8465 * fact that this is not correct. See thread6.cs for an example.
8466 * But propagating the abort exception is more important than
8467 * getting the sematics right.
8469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8471 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8473 MONO_START_BB (cfg, dont_throw);
8478 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8480 for (tmp = handlers; tmp; tmp = tmp->next) {
8482 link_bblock (cfg, bblock, tblock);
8483 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8484 ins->inst_target_bb = tblock;
8485 MONO_ADD_INS (bblock, ins);
8487 g_list_free (handlers);
8490 MONO_INST_NEW (cfg, ins, OP_BR);
8491 MONO_ADD_INS (bblock, ins);
8492 GET_BBLOCK (cfg, tblock, target);
8493 link_bblock (cfg, bblock, tblock);
8494 CHECK_BBLOCK (target, ip, tblock);
8495 ins->inst_target_bb = tblock;
8496 start_new_bblock = 1;
8498 if (*ip == CEE_LEAVE)
8507 * Mono specific opcodes
8509 case MONO_CUSTOM_PREFIX: {
8511 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8515 case CEE_MONO_ICALL: {
8517 MonoJitICallInfo *info;
8519 token = read32 (ip + 2);
8520 func = mono_method_get_wrapper_data (method, token);
8521 info = mono_find_jit_icall_by_addr (func);
8524 CHECK_STACK (info->sig->param_count);
8525 sp -= info->sig->param_count;
8527 ins = mono_emit_jit_icall (cfg, info->func, sp);
8528 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8532 inline_costs += 10 * num_calls++;
8536 case CEE_MONO_LDPTR: {
8539 CHECK_STACK_OVF (1);
8541 token = read32 (ip + 2);
8543 ptr = mono_method_get_wrapper_data (method, token);
8544 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8545 MonoJitICallInfo *callinfo;
8546 const char *icall_name;
8548 icall_name = method->name + strlen ("__icall_wrapper_");
8549 g_assert (icall_name);
8550 callinfo = mono_find_jit_icall_by_name (icall_name);
8551 g_assert (callinfo);
8553 if (ptr == callinfo->func) {
8554 /* Will be transformed into an AOTCONST later */
8555 EMIT_NEW_PCONST (cfg, ins, ptr);
8561 /* FIXME: Generalize this */
8562 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8563 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8568 EMIT_NEW_PCONST (cfg, ins, ptr);
8571 inline_costs += 10 * num_calls++;
8572 /* Can't embed random pointers into AOT code */
8573 cfg->disable_aot = 1;
8576 case CEE_MONO_ICALL_ADDR: {
8577 MonoMethod *cmethod;
8580 CHECK_STACK_OVF (1);
8582 token = read32 (ip + 2);
8584 cmethod = mono_method_get_wrapper_data (method, token);
8586 if (cfg->compile_aot) {
8587 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8589 ptr = mono_lookup_internal_call (cmethod);
8591 EMIT_NEW_PCONST (cfg, ins, ptr);
8597 case CEE_MONO_VTADDR: {
8598 MonoInst *src_var, *src;
8604 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8605 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8610 case CEE_MONO_NEWOBJ: {
8611 MonoInst *iargs [2];
8613 CHECK_STACK_OVF (1);
8615 token = read32 (ip + 2);
8616 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8617 mono_class_init (klass);
8618 NEW_DOMAINCONST (cfg, iargs [0]);
8619 MONO_ADD_INS (cfg->cbb, iargs [0]);
8620 NEW_CLASSCONST (cfg, iargs [1], klass);
8621 MONO_ADD_INS (cfg->cbb, iargs [1]);
8622 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8624 inline_costs += 10 * num_calls++;
8627 case CEE_MONO_OBJADDR:
8630 MONO_INST_NEW (cfg, ins, OP_MOVE);
8631 ins->dreg = alloc_preg (cfg);
8632 ins->sreg1 = sp [0]->dreg;
8633 ins->type = STACK_MP;
8634 MONO_ADD_INS (cfg->cbb, ins);
8638 case CEE_MONO_LDNATIVEOBJ:
8640 * Similar to LDOBJ, but instead load the unmanaged
8641 * representation of the vtype to the stack.
8646 token = read32 (ip + 2);
8647 klass = mono_method_get_wrapper_data (method, token);
8648 g_assert (klass->valuetype);
8649 mono_class_init (klass);
8652 MonoInst *src, *dest, *temp;
8655 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8656 temp->backend.is_pinvoke = 1;
8657 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8658 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8660 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8661 dest->type = STACK_VTYPE;
8662 dest->klass = klass;
8668 case CEE_MONO_RETOBJ: {
8670 * Same as RET, but return the native representation of a vtype
8673 g_assert (cfg->ret);
8674 g_assert (mono_method_signature (method)->pinvoke);
8679 token = read32 (ip + 2);
8680 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8682 if (!cfg->vret_addr) {
8683 g_assert (cfg->ret_var_is_local);
8685 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8687 EMIT_NEW_RETLOADA (cfg, ins);
8689 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8691 if (sp != stack_start)
8694 MONO_INST_NEW (cfg, ins, OP_BR);
8695 ins->inst_target_bb = end_bblock;
8696 MONO_ADD_INS (bblock, ins);
8697 link_bblock (cfg, bblock, end_bblock);
8698 start_new_bblock = 1;
8702 case CEE_MONO_CISINST:
8703 case CEE_MONO_CCASTCLASS: {
8708 token = read32 (ip + 2);
8709 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8710 if (ip [1] == CEE_MONO_CISINST)
8711 ins = handle_cisinst (cfg, klass, sp [0]);
8713 ins = handle_ccastclass (cfg, klass, sp [0]);
8719 case CEE_MONO_SAVE_LMF:
8720 case CEE_MONO_RESTORE_LMF:
8721 #ifdef MONO_ARCH_HAVE_LMF_OPS
8722 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8723 MONO_ADD_INS (bblock, ins);
8724 cfg->need_lmf_area = TRUE;
8728 case CEE_MONO_CLASSCONST:
8729 CHECK_STACK_OVF (1);
8731 token = read32 (ip + 2);
8732 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8735 inline_costs += 10 * num_calls++;
8737 case CEE_MONO_NOT_TAKEN:
8738 bblock->out_of_line = TRUE;
8742 CHECK_STACK_OVF (1);
8744 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8745 ins->dreg = alloc_preg (cfg);
8746 ins->inst_offset = (gint32)read32 (ip + 2);
8747 ins->type = STACK_PTR;
8748 MONO_ADD_INS (bblock, ins);
8753 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8763 /* somewhat similar to LDTOKEN */
8764 MonoInst *addr, *vtvar;
8765 CHECK_STACK_OVF (1);
8766 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8768 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8769 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8771 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8772 ins->type = STACK_VTYPE;
8773 ins->klass = mono_defaults.argumenthandle_class;
8786 * The following transforms:
8787 * CEE_CEQ into OP_CEQ
8788 * CEE_CGT into OP_CGT
8789 * CEE_CGT_UN into OP_CGT_UN
8790 * CEE_CLT into OP_CLT
8791 * CEE_CLT_UN into OP_CLT_UN
8793 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8795 MONO_INST_NEW (cfg, ins, cmp->opcode);
8797 cmp->sreg1 = sp [0]->dreg;
8798 cmp->sreg2 = sp [1]->dreg;
8799 type_from_op (cmp, sp [0], sp [1]);
8801 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8802 cmp->opcode = OP_LCOMPARE;
8803 else if (sp [0]->type == STACK_R8)
8804 cmp->opcode = OP_FCOMPARE;
8806 cmp->opcode = OP_ICOMPARE;
8807 MONO_ADD_INS (bblock, cmp);
8808 ins->type = STACK_I4;
8809 ins->dreg = alloc_dreg (cfg, ins->type);
8810 type_from_op (ins, sp [0], sp [1]);
8812 if (cmp->opcode == OP_FCOMPARE) {
8814 * The backends expect the fceq opcodes to do the
8817 cmp->opcode = OP_NOP;
8818 ins->sreg1 = cmp->sreg1;
8819 ins->sreg2 = cmp->sreg2;
8821 MONO_ADD_INS (bblock, ins);
8828 MonoMethod *cil_method, *ctor_method;
8829 gboolean is_shared = FALSE;
8831 CHECK_STACK_OVF (1);
8833 n = read32 (ip + 2);
8834 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8837 mono_class_init (cmethod->klass);
8839 mono_save_token_info (cfg, image, n, cmethod);
8841 if (cfg->generic_sharing_context)
8842 context_used = mono_method_check_context_used (cmethod);
8844 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8845 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8846 (cmethod->klass->generic_class ||
8847 cmethod->klass->generic_container)) {
8850 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8854 cil_method = cmethod;
8855 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8856 METHOD_ACCESS_FAILURE;
8858 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8859 if (check_linkdemand (cfg, method, cmethod))
8861 CHECK_CFG_EXCEPTION;
8862 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8863 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8867 * Optimize the common case of ldftn+delegate creation
8869 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8870 /* FIXME: SGEN support */
8871 /* FIXME: handle shared static generic methods */
8872 /* FIXME: handle this in shared code */
8873 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8874 MonoInst *target_ins;
8877 if (cfg->verbose_level > 3)
8878 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8879 target_ins = sp [-1];
8881 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8892 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8894 EMIT_GET_RGCTX (rgctx, context_used);
8895 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8896 } else if (is_shared) {
8897 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8899 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8901 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8905 inline_costs += 10 * num_calls++;
8908 case CEE_LDVIRTFTN: {
8913 n = read32 (ip + 2);
8914 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8917 mono_class_init (cmethod->klass);
8919 if (cfg->generic_sharing_context)
8920 context_used = mono_method_check_context_used (cmethod);
8922 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8923 if (check_linkdemand (cfg, method, cmethod))
8925 CHECK_CFG_EXCEPTION;
8926 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8927 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8936 EMIT_GET_RGCTX (rgctx, context_used);
8937 args [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8938 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8940 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8941 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8945 inline_costs += 10 * num_calls++;
8949 CHECK_STACK_OVF (1);
8951 n = read16 (ip + 2);
8953 EMIT_NEW_ARGLOAD (cfg, ins, n);
8958 CHECK_STACK_OVF (1);
8960 n = read16 (ip + 2);
8962 NEW_ARGLOADA (cfg, ins, n);
8963 MONO_ADD_INS (cfg->cbb, ins);
8971 n = read16 (ip + 2);
8973 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8975 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8979 CHECK_STACK_OVF (1);
8981 n = read16 (ip + 2);
8983 EMIT_NEW_LOCLOAD (cfg, ins, n);
8988 CHECK_STACK_OVF (1);
8990 n = read16 (ip + 2);
8992 EMIT_NEW_LOCLOADA (cfg, ins, n);
9000 n = read16 (ip + 2);
9002 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9004 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
9011 if (sp != stack_start)
9013 if (cfg->method != method)
9015 * Inlining this into a loop in a parent could lead to
9016 * stack overflows which is different behavior than the
9017 * non-inlined case, thus disable inlining in this case.
9019 goto inline_failure;
9021 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9022 ins->dreg = alloc_preg (cfg);
9023 ins->sreg1 = sp [0]->dreg;
9024 ins->type = STACK_PTR;
9025 MONO_ADD_INS (cfg->cbb, ins);
9027 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9028 if (header->init_locals)
9029 ins->flags |= MONO_INST_INIT;
9034 case CEE_ENDFILTER: {
9035 MonoExceptionClause *clause, *nearest;
9036 int cc, nearest_num;
9040 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9042 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9043 ins->sreg1 = (*sp)->dreg;
9044 MONO_ADD_INS (bblock, ins);
9045 start_new_bblock = 1;
9050 for (cc = 0; cc < header->num_clauses; ++cc) {
9051 clause = &header->clauses [cc];
9052 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9053 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9054 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9060 if ((ip - header->code) != nearest->handler_offset)
9065 case CEE_UNALIGNED_:
9066 ins_flag |= MONO_INST_UNALIGNED;
9067 /* FIXME: record alignment? we can assume 1 for now */
9072 ins_flag |= MONO_INST_VOLATILE;
9076 ins_flag |= MONO_INST_TAILCALL;
9077 cfg->flags |= MONO_CFG_HAS_TAIL;
9078 /* Can't inline tail calls at this time */
9079 inline_costs += 100000;
9086 token = read32 (ip + 2);
9087 klass = mini_get_class (method, token, generic_context);
9088 CHECK_TYPELOAD (klass);
9089 if (generic_class_is_reference_type (cfg, klass)) {
9090 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9092 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9093 mini_emit_initobj (cfg, *sp, NULL, klass);
9098 case CEE_CONSTRAINED_:
9100 token = read32 (ip + 2);
9101 constrained_call = mono_class_get_full (image, token, generic_context);
9102 CHECK_TYPELOAD (constrained_call);
9107 MonoInst *iargs [3];
9111 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9112 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9113 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9114 /* emit_memset only works when val == 0 */
9115 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9120 if (ip [1] == CEE_CPBLK) {
9121 MonoMethod *memcpy_method = get_memcpy_method ();
9122 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9124 MonoMethod *memset_method = get_memset_method ();
9125 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9135 ins_flag |= MONO_INST_NOTYPECHECK;
9137 ins_flag |= MONO_INST_NORANGECHECK;
9138 /* we ignore the no-nullcheck for now since we
9139 * really do it explicitly only when doing callvirt->call
9145 int handler_offset = -1;
9147 for (i = 0; i < header->num_clauses; ++i) {
9148 MonoExceptionClause *clause = &header->clauses [i];
9149 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9150 handler_offset = clause->handler_offset;
9155 bblock->flags |= BB_EXCEPTION_UNSAFE;
9157 g_assert (handler_offset != -1);
9159 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9160 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9161 ins->sreg1 = load->dreg;
9162 MONO_ADD_INS (bblock, ins);
9164 link_bblock (cfg, bblock, end_bblock);
9165 start_new_bblock = 1;
9173 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9175 CHECK_STACK_OVF (1);
9177 token = read32 (ip + 2);
9178 /* FIXXME: handle generics. */
9179 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9180 MonoType *type = mono_type_create_from_typespec (image, token);
9181 token = mono_type_size (type, &ialign);
9183 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9184 CHECK_TYPELOAD (klass);
9185 mono_class_init (klass);
9186 token = mono_class_value_size (klass, &align);
9188 EMIT_NEW_ICONST (cfg, ins, token);
9193 case CEE_REFANYTYPE: {
9194 MonoInst *src_var, *src;
9200 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9202 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9203 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9204 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9214 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9219 g_error ("opcode 0x%02x not handled", *ip);
9222 if (start_new_bblock != 1)
9225 bblock->cil_length = ip - bblock->cil_code;
9226 bblock->next_bb = end_bblock;
9228 if (cfg->method == method && cfg->domainvar) {
9230 MonoInst *get_domain;
9232 cfg->cbb = init_localsbb;
9234 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9235 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9238 get_domain->dreg = alloc_preg (cfg);
9239 MONO_ADD_INS (cfg->cbb, get_domain);
9241 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9242 MONO_ADD_INS (cfg->cbb, store);
9245 if (cfg->method == method && cfg->got_var)
9246 mono_emit_load_got_addr (cfg);
9248 if (header->init_locals) {
9251 cfg->cbb = init_localsbb;
9252 cfg->ip = header->code;
9253 for (i = 0; i < header->num_locals; ++i) {
9254 MonoType *ptype = header->locals [i];
9255 int t = ptype->type;
9256 dreg = cfg->locals [i]->dreg;
9258 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9259 t = ptype->data.klass->enum_basetype->type;
9261 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9262 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9263 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9264 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9265 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9266 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9267 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9268 ins->type = STACK_R8;
9269 ins->inst_p0 = (void*)&r8_0;
9270 ins->dreg = alloc_dreg (cfg, STACK_R8);
9271 MONO_ADD_INS (init_localsbb, ins);
9272 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9273 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9274 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9275 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9277 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9284 /* resolve backward branches in the middle of an existing basic block */
9285 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9287 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9288 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9289 if (tblock != start_bblock) {
9291 split_bblock (cfg, tblock, bblock);
9292 l = bblock->cil_code - header->code;
9293 bblock->cil_length = tblock->cil_length - l;
9294 tblock->cil_length = l;
9296 printf ("recheck failed.\n");
9300 if (cfg->method == method) {
9302 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9303 bb->region = mono_find_block_region (cfg, bb->real_offset);
9305 mono_create_spvar_for_region (cfg, bb->region);
9306 if (cfg->verbose_level > 2)
9307 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9311 g_slist_free (class_inits);
9312 dont_inline = g_list_remove (dont_inline, method);
9314 if (inline_costs < 0) {
9317 /* Method is too large */
9318 mname = mono_method_full_name (method, TRUE);
9319 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9320 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9325 if ((cfg->verbose_level > 1) && (cfg->method == method))
9326 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9328 return inline_costs;
9331 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9332 g_slist_free (class_inits);
9333 dont_inline = g_list_remove (dont_inline, method);
9337 g_slist_free (class_inits);
9338 dont_inline = g_list_remove (dont_inline, method);
9342 g_slist_free (class_inits);
9343 dont_inline = g_list_remove (dont_inline, method);
9344 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9348 g_slist_free (class_inits);
9349 dont_inline = g_list_remove (dont_inline, method);
9350 set_exception_type_from_invalid_il (cfg, method, ip);
9355 store_membase_reg_to_store_membase_imm (int opcode)
9358 case OP_STORE_MEMBASE_REG:
9359 return OP_STORE_MEMBASE_IMM;
9360 case OP_STOREI1_MEMBASE_REG:
9361 return OP_STOREI1_MEMBASE_IMM;
9362 case OP_STOREI2_MEMBASE_REG:
9363 return OP_STOREI2_MEMBASE_IMM;
9364 case OP_STOREI4_MEMBASE_REG:
9365 return OP_STOREI4_MEMBASE_IMM;
9366 case OP_STOREI8_MEMBASE_REG:
9367 return OP_STOREI8_MEMBASE_IMM;
9369 g_assert_not_reached ();
9376 mono_op_to_op_imm (int opcode)
9386 return OP_IDIV_UN_IMM;
9390 return OP_IREM_UN_IMM;
9404 return OP_ISHR_UN_IMM;
9421 return OP_LSHR_UN_IMM;
9424 return OP_COMPARE_IMM;
9426 return OP_ICOMPARE_IMM;
9428 return OP_LCOMPARE_IMM;
9430 case OP_STORE_MEMBASE_REG:
9431 return OP_STORE_MEMBASE_IMM;
9432 case OP_STOREI1_MEMBASE_REG:
9433 return OP_STOREI1_MEMBASE_IMM;
9434 case OP_STOREI2_MEMBASE_REG:
9435 return OP_STOREI2_MEMBASE_IMM;
9436 case OP_STOREI4_MEMBASE_REG:
9437 return OP_STOREI4_MEMBASE_IMM;
9439 #if defined(__i386__) || defined (__x86_64__)
9441 return OP_X86_PUSH_IMM;
9442 case OP_X86_COMPARE_MEMBASE_REG:
9443 return OP_X86_COMPARE_MEMBASE_IMM;
9445 #if defined(__x86_64__)
9446 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9447 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9449 case OP_VOIDCALL_REG:
9458 return OP_LOCALLOC_IMM;
9465 ldind_to_load_membase (int opcode)
9469 return OP_LOADI1_MEMBASE;
9471 return OP_LOADU1_MEMBASE;
9473 return OP_LOADI2_MEMBASE;
9475 return OP_LOADU2_MEMBASE;
9477 return OP_LOADI4_MEMBASE;
9479 return OP_LOADU4_MEMBASE;
9481 return OP_LOAD_MEMBASE;
9483 return OP_LOAD_MEMBASE;
9485 return OP_LOADI8_MEMBASE;
9487 return OP_LOADR4_MEMBASE;
9489 return OP_LOADR8_MEMBASE;
9491 g_assert_not_reached ();
9498 stind_to_store_membase (int opcode)
9502 return OP_STOREI1_MEMBASE_REG;
9504 return OP_STOREI2_MEMBASE_REG;
9506 return OP_STOREI4_MEMBASE_REG;
9509 return OP_STORE_MEMBASE_REG;
9511 return OP_STOREI8_MEMBASE_REG;
9513 return OP_STORER4_MEMBASE_REG;
9515 return OP_STORER8_MEMBASE_REG;
9517 g_assert_not_reached ();
9524 mono_load_membase_to_load_mem (int opcode)
9526 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9527 #if defined(__i386__) || defined(__x86_64__)
9529 case OP_LOAD_MEMBASE:
9531 case OP_LOADU1_MEMBASE:
9532 return OP_LOADU1_MEM;
9533 case OP_LOADU2_MEMBASE:
9534 return OP_LOADU2_MEM;
9535 case OP_LOADI4_MEMBASE:
9536 return OP_LOADI4_MEM;
9537 case OP_LOADU4_MEMBASE:
9538 return OP_LOADU4_MEM;
9539 #if SIZEOF_VOID_P == 8
9540 case OP_LOADI8_MEMBASE:
9541 return OP_LOADI8_MEM;
9550 op_to_op_dest_membase (int store_opcode, int opcode)
9552 #if defined(__i386__)
9553 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9558 return OP_X86_ADD_MEMBASE_REG;
9560 return OP_X86_SUB_MEMBASE_REG;
9562 return OP_X86_AND_MEMBASE_REG;
9564 return OP_X86_OR_MEMBASE_REG;
9566 return OP_X86_XOR_MEMBASE_REG;
9569 return OP_X86_ADD_MEMBASE_IMM;
9572 return OP_X86_SUB_MEMBASE_IMM;
9575 return OP_X86_AND_MEMBASE_IMM;
9578 return OP_X86_OR_MEMBASE_IMM;
9581 return OP_X86_XOR_MEMBASE_IMM;
9587 #if defined(__x86_64__)
9588 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9593 return OP_X86_ADD_MEMBASE_REG;
9595 return OP_X86_SUB_MEMBASE_REG;
9597 return OP_X86_AND_MEMBASE_REG;
9599 return OP_X86_OR_MEMBASE_REG;
9601 return OP_X86_XOR_MEMBASE_REG;
9603 return OP_X86_ADD_MEMBASE_IMM;
9605 return OP_X86_SUB_MEMBASE_IMM;
9607 return OP_X86_AND_MEMBASE_IMM;
9609 return OP_X86_OR_MEMBASE_IMM;
9611 return OP_X86_XOR_MEMBASE_IMM;
9613 return OP_AMD64_ADD_MEMBASE_REG;
9615 return OP_AMD64_SUB_MEMBASE_REG;
9617 return OP_AMD64_AND_MEMBASE_REG;
9619 return OP_AMD64_OR_MEMBASE_REG;
9621 return OP_AMD64_XOR_MEMBASE_REG;
9624 return OP_AMD64_ADD_MEMBASE_IMM;
9627 return OP_AMD64_SUB_MEMBASE_IMM;
9630 return OP_AMD64_AND_MEMBASE_IMM;
9633 return OP_AMD64_OR_MEMBASE_IMM;
9636 return OP_AMD64_XOR_MEMBASE_IMM;
9646 op_to_op_store_membase (int store_opcode, int opcode)
9648 #if defined(__i386__) || defined(__x86_64__)
9651 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9652 return OP_X86_SETEQ_MEMBASE;
9654 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9655 return OP_X86_SETNE_MEMBASE;
9663 op_to_op_src1_membase (int load_opcode, int opcode)
9666 /* FIXME: This has sign extension issues */
9668 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9669 return OP_X86_COMPARE_MEMBASE8_IMM;
9672 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9677 return OP_X86_PUSH_MEMBASE;
9678 case OP_COMPARE_IMM:
9679 case OP_ICOMPARE_IMM:
9680 return OP_X86_COMPARE_MEMBASE_IMM;
9683 return OP_X86_COMPARE_MEMBASE_REG;
9688 /* FIXME: This has sign extension issues */
9690 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9691 return OP_X86_COMPARE_MEMBASE8_IMM;
9696 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9697 return OP_X86_PUSH_MEMBASE;
9699 /* FIXME: This only works for 32 bit immediates
9700 case OP_COMPARE_IMM:
9701 case OP_LCOMPARE_IMM:
9702 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9703 return OP_AMD64_COMPARE_MEMBASE_IMM;
9705 case OP_ICOMPARE_IMM:
9706 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9707 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9711 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9712 return OP_AMD64_COMPARE_MEMBASE_REG;
9715 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9716 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9725 op_to_op_src2_membase (int load_opcode, int opcode)
9728 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9734 return OP_X86_COMPARE_REG_MEMBASE;
9736 return OP_X86_ADD_REG_MEMBASE;
9738 return OP_X86_SUB_REG_MEMBASE;
9740 return OP_X86_AND_REG_MEMBASE;
9742 return OP_X86_OR_REG_MEMBASE;
9744 return OP_X86_XOR_REG_MEMBASE;
9751 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9752 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9756 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9757 return OP_AMD64_COMPARE_REG_MEMBASE;
9760 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9761 return OP_X86_ADD_REG_MEMBASE;
9763 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9764 return OP_X86_SUB_REG_MEMBASE;
9766 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9767 return OP_X86_AND_REG_MEMBASE;
9769 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9770 return OP_X86_OR_REG_MEMBASE;
9772 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9773 return OP_X86_XOR_REG_MEMBASE;
9775 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9776 return OP_AMD64_ADD_REG_MEMBASE;
9778 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9779 return OP_AMD64_SUB_REG_MEMBASE;
9781 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9782 return OP_AMD64_AND_REG_MEMBASE;
9784 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9785 return OP_AMD64_OR_REG_MEMBASE;
9787 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9788 return OP_AMD64_XOR_REG_MEMBASE;
9796 mono_op_to_op_imm_noemul (int opcode)
9799 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9804 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9812 return mono_op_to_op_imm (opcode);
9817 * mono_handle_global_vregs:
9819 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9823 mono_handle_global_vregs (MonoCompile *cfg)
9829 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9831 /* Find local vregs used in more than one bb */
9832 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9833 MonoInst *ins = bb->code;
9834 int block_num = bb->block_num;
9836 if (cfg->verbose_level > 1)
9837 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9840 for (; ins; ins = ins->next) {
9841 const char *spec = INS_INFO (ins->opcode);
9842 int regtype, regindex;
9845 if (G_UNLIKELY (cfg->verbose_level > 1))
9846 mono_print_ins (ins);
9848 g_assert (ins->opcode >= MONO_CEE_LAST);
9850 for (regindex = 0; regindex < 3; regindex ++) {
9853 if (regindex == 0) {
9854 regtype = spec [MONO_INST_DEST];
9858 } else if (regindex == 1) {
9859 regtype = spec [MONO_INST_SRC1];
9864 regtype = spec [MONO_INST_SRC2];
9870 #if SIZEOF_VOID_P == 4
9871 if (regtype == 'l') {
9873 * Since some instructions reference the original long vreg,
9874 * and some reference the two component vregs, it is quite hard
9875 * to determine when it needs to be global. So be conservative.
9877 if (!get_vreg_to_inst (cfg, vreg)) {
9878 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9880 if (cfg->verbose_level > 1)
9881 printf ("LONG VREG R%d made global.\n", vreg);
9885 * Make the component vregs volatile since the optimizations can
9886 * get confused otherwise.
9888 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9889 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9893 g_assert (vreg != -1);
9895 prev_bb = vreg_to_bb [vreg];
9897 /* 0 is a valid block num */
9898 vreg_to_bb [vreg] = block_num + 1;
9899 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9900 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9903 if (!get_vreg_to_inst (cfg, vreg)) {
9904 if (G_UNLIKELY (cfg->verbose_level > 1))
9905 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9909 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9912 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9915 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9918 g_assert_not_reached ();
9922 /* Flag as having been used in more than one bb */
9923 vreg_to_bb [vreg] = -1;
9929 /* If a variable is used in only one bblock, convert it into a local vreg */
9930 for (i = 0; i < cfg->num_varinfo; i++) {
9931 MonoInst *var = cfg->varinfo [i];
9932 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9934 switch (var->type) {
9940 #if SIZEOF_VOID_P == 8
9943 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9944 /* Enabling this screws up the fp stack on x86 */
9947 /* Arguments are implicitly global */
9948 /* Putting R4 vars into registers doesn't work currently */
9949 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9951 * Make that the variable's liveness interval doesn't contain a call, since
9952 * that would cause the lvreg to be spilled, making the whole optimization
9955 /* This is too slow for JIT compilation */
9957 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9959 int def_index, call_index, ins_index;
9960 gboolean spilled = FALSE;
9965 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9966 const char *spec = INS_INFO (ins->opcode);
9968 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9969 def_index = ins_index;
9971 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9972 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9973 if (call_index > def_index) {
9979 if (MONO_IS_CALL (ins))
9980 call_index = ins_index;
9990 if (G_UNLIKELY (cfg->verbose_level > 2))
9991 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9992 var->flags |= MONO_INST_IS_DEAD;
9993 cfg->vreg_to_inst [var->dreg] = NULL;
10000 * Compress the varinfo and vars tables so the liveness computation is faster and
10001 * takes up less space.
10004 for (i = 0; i < cfg->num_varinfo; ++i) {
10005 MonoInst *var = cfg->varinfo [i];
10006 if (pos < i && cfg->locals_start == i)
10007 cfg->locals_start = pos;
10008 if (!(var->flags & MONO_INST_IS_DEAD)) {
10010 cfg->varinfo [pos] = cfg->varinfo [i];
10011 cfg->varinfo [pos]->inst_c0 = pos;
10012 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10013 cfg->vars [pos].idx = pos;
10014 #if SIZEOF_VOID_P == 4
10015 if (cfg->varinfo [pos]->type == STACK_I8) {
10016 /* Modify the two component vars too */
10019 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10020 var1->inst_c0 = pos;
10021 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10022 var1->inst_c0 = pos;
10029 cfg->num_varinfo = pos;
10030 if (cfg->locals_start > cfg->num_varinfo)
10031 cfg->locals_start = cfg->num_varinfo;
10035 * mono_spill_global_vars:
10037 * Generate spill code for variables which are not allocated to registers,
10038 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10039 * code is generated which could be optimized by the local optimization passes.
10042 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10044 MonoBasicBlock *bb;
10046 int orig_next_vreg;
10047 guint32 *vreg_to_lvreg;
10049 guint32 i, lvregs_len;
10050 gboolean dest_has_lvreg = FALSE;
10051 guint32 stacktypes [128];
10053 *need_local_opts = FALSE;
10055 memset (spec2, 0, sizeof (spec2));
10057 /* FIXME: Move this function to mini.c */
10058 stacktypes ['i'] = STACK_PTR;
10059 stacktypes ['l'] = STACK_I8;
10060 stacktypes ['f'] = STACK_R8;
10062 #if SIZEOF_VOID_P == 4
10063 /* Create MonoInsts for longs */
10064 for (i = 0; i < cfg->num_varinfo; i++) {
10065 MonoInst *ins = cfg->varinfo [i];
10067 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10068 switch (ins->type) {
10069 #ifdef MONO_ARCH_SOFT_FLOAT
10075 g_assert (ins->opcode == OP_REGOFFSET);
10077 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10079 tree->opcode = OP_REGOFFSET;
10080 tree->inst_basereg = ins->inst_basereg;
10081 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10083 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10085 tree->opcode = OP_REGOFFSET;
10086 tree->inst_basereg = ins->inst_basereg;
10087 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10097 /* FIXME: widening and truncation */
10100 * As an optimization, when a variable allocated to the stack is first loaded into
10101 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10102 * the variable again.
10104 orig_next_vreg = cfg->next_vreg;
10105 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10106 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10109 /* Add spill loads/stores */
10110 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10113 if (cfg->verbose_level > 1)
10114 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10116 /* Clear vreg_to_lvreg array */
10117 for (i = 0; i < lvregs_len; i++)
10118 vreg_to_lvreg [lvregs [i]] = 0;
10122 MONO_BB_FOR_EACH_INS (bb, ins) {
10123 const char *spec = INS_INFO (ins->opcode);
10124 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10125 gboolean store, no_lvreg;
10127 if (G_UNLIKELY (cfg->verbose_level > 1))
10128 mono_print_ins (ins);
10130 if (ins->opcode == OP_NOP)
10134 * We handle LDADDR here as well, since it can only be decomposed
10135 * when variable addresses are known.
10137 if (ins->opcode == OP_LDADDR) {
10138 MonoInst *var = ins->inst_p0;
10140 if (var->opcode == OP_VTARG_ADDR) {
10141 /* Happens on SPARC/S390 where vtypes are passed by reference */
10142 MonoInst *vtaddr = var->inst_left;
10143 if (vtaddr->opcode == OP_REGVAR) {
10144 ins->opcode = OP_MOVE;
10145 ins->sreg1 = vtaddr->dreg;
10147 else if (var->inst_left->opcode == OP_REGOFFSET) {
10148 ins->opcode = OP_LOAD_MEMBASE;
10149 ins->inst_basereg = vtaddr->inst_basereg;
10150 ins->inst_offset = vtaddr->inst_offset;
10154 g_assert (var->opcode == OP_REGOFFSET);
10156 ins->opcode = OP_ADD_IMM;
10157 ins->sreg1 = var->inst_basereg;
10158 ins->inst_imm = var->inst_offset;
10161 *need_local_opts = TRUE;
10162 spec = INS_INFO (ins->opcode);
10165 if (ins->opcode < MONO_CEE_LAST) {
10166 mono_print_ins (ins);
10167 g_assert_not_reached ();
10171 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10175 if (MONO_IS_STORE_MEMBASE (ins)) {
10176 tmp_reg = ins->dreg;
10177 ins->dreg = ins->sreg2;
10178 ins->sreg2 = tmp_reg;
10181 spec2 [MONO_INST_DEST] = ' ';
10182 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10183 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10185 } else if (MONO_IS_STORE_MEMINDEX (ins))
10186 g_assert_not_reached ();
10191 if (G_UNLIKELY (cfg->verbose_level > 1))
10192 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10197 regtype = spec [MONO_INST_DEST];
10198 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10201 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10202 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10203 MonoInst *store_ins;
10206 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10208 if (var->opcode == OP_REGVAR) {
10209 ins->dreg = var->dreg;
10210 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10212 * Instead of emitting a load+store, use a _membase opcode.
10214 g_assert (var->opcode == OP_REGOFFSET);
10215 if (ins->opcode == OP_MOVE) {
10218 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10219 ins->inst_basereg = var->inst_basereg;
10220 ins->inst_offset = var->inst_offset;
10223 spec = INS_INFO (ins->opcode);
10227 g_assert (var->opcode == OP_REGOFFSET);
10229 prev_dreg = ins->dreg;
10231 /* Invalidate any previous lvreg for this vreg */
10232 vreg_to_lvreg [ins->dreg] = 0;
10236 #ifdef MONO_ARCH_SOFT_FLOAT
10237 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10239 store_opcode = OP_STOREI8_MEMBASE_REG;
10243 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10245 if (regtype == 'l') {
10246 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10247 mono_bblock_insert_after_ins (bb, ins, store_ins);
10248 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10249 mono_bblock_insert_after_ins (bb, ins, store_ins);
10252 g_assert (store_opcode != OP_STOREV_MEMBASE);
10254 /* Try to fuse the store into the instruction itself */
10255 /* FIXME: Add more instructions */
10256 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10257 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10258 ins->inst_imm = ins->inst_c0;
10259 ins->inst_destbasereg = var->inst_basereg;
10260 ins->inst_offset = var->inst_offset;
10261 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10262 ins->opcode = store_opcode;
10263 ins->inst_destbasereg = var->inst_basereg;
10264 ins->inst_offset = var->inst_offset;
10268 tmp_reg = ins->dreg;
10269 ins->dreg = ins->sreg2;
10270 ins->sreg2 = tmp_reg;
10273 spec2 [MONO_INST_DEST] = ' ';
10274 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10275 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10277 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10278 // FIXME: The backends expect the base reg to be in inst_basereg
10279 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10281 ins->inst_basereg = var->inst_basereg;
10282 ins->inst_offset = var->inst_offset;
10283 spec = INS_INFO (ins->opcode);
10285 /* printf ("INS: "); mono_print_ins (ins); */
10286 /* Create a store instruction */
10287 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10289 /* Insert it after the instruction */
10290 mono_bblock_insert_after_ins (bb, ins, store_ins);
10293 * We can't assign ins->dreg to var->dreg here, since the
10294 * sregs could use it. So set a flag, and do it after
10297 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10298 dest_has_lvreg = TRUE;
10307 for (srcindex = 0; srcindex < 2; ++srcindex) {
10308 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10309 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10311 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10312 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10313 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10314 MonoInst *load_ins;
10315 guint32 load_opcode;
10317 if (var->opcode == OP_REGVAR) {
10319 ins->sreg1 = var->dreg;
10321 ins->sreg2 = var->dreg;
10325 g_assert (var->opcode == OP_REGOFFSET);
10327 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10329 g_assert (load_opcode != OP_LOADV_MEMBASE);
10331 if (vreg_to_lvreg [sreg]) {
10332 /* The variable is already loaded to an lvreg */
10333 if (G_UNLIKELY (cfg->verbose_level > 1))
10334 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10336 ins->sreg1 = vreg_to_lvreg [sreg];
10338 ins->sreg2 = vreg_to_lvreg [sreg];
10342 /* Try to fuse the load into the instruction */
10343 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10344 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10345 ins->inst_basereg = var->inst_basereg;
10346 ins->inst_offset = var->inst_offset;
10347 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10348 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10349 ins->sreg2 = var->inst_basereg;
10350 ins->inst_offset = var->inst_offset;
10352 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10353 ins->opcode = OP_NOP;
10356 //printf ("%d ", srcindex); mono_print_ins (ins);
10358 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10360 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10361 if (var->dreg == prev_dreg) {
10363 * sreg refers to the value loaded by the load
10364 * emitted below, but we need to use ins->dreg
10365 * since it refers to the store emitted earlier.
10369 vreg_to_lvreg [var->dreg] = sreg;
10370 g_assert (lvregs_len < 1024);
10371 lvregs [lvregs_len ++] = var->dreg;
10380 if (regtype == 'l') {
10381 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10382 mono_bblock_insert_before_ins (bb, ins, load_ins);
10383 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10384 mono_bblock_insert_before_ins (bb, ins, load_ins);
10387 #if SIZEOF_VOID_P == 4
10388 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10390 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10391 mono_bblock_insert_before_ins (bb, ins, load_ins);
10397 if (dest_has_lvreg) {
10398 vreg_to_lvreg [prev_dreg] = ins->dreg;
10399 g_assert (lvregs_len < 1024);
10400 lvregs [lvregs_len ++] = prev_dreg;
10401 dest_has_lvreg = FALSE;
10405 tmp_reg = ins->dreg;
10406 ins->dreg = ins->sreg2;
10407 ins->sreg2 = tmp_reg;
10410 if (MONO_IS_CALL (ins)) {
10411 /* Clear vreg_to_lvreg array */
10412 for (i = 0; i < lvregs_len; i++)
10413 vreg_to_lvreg [lvregs [i]] = 0;
10417 if (cfg->verbose_level > 1)
10418 mono_print_ins_index (1, ins);
10425 * - use 'iadd' instead of 'int_add'
10426 * - handling ovf opcodes: decompose in method_to_ir.
10427 * - unify iregs/fregs
10428 * -> partly done, the missing parts are:
10429 * - a more complete unification would involve unifying the hregs as well, so
10430 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10431 * would no longer map to the machine hregs, so the code generators would need to
10432 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10433 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10434 * fp/non-fp branches speeds it up by about 15%.
10435 * - use sext/zext opcodes instead of shifts
10437 * - get rid of TEMPLOADs if possible and use vregs instead
10438 * - clean up usage of OP_P/OP_ opcodes
10439 * - cleanup usage of DUMMY_USE
10440 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10442 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10443 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10444 * - make sure handle_stack_args () is called before the branch is emitted
10445 * - when the new IR is done, get rid of all unused stuff
10446 * - COMPARE/BEQ as separate instructions or unify them ?
10447 * - keeping them separate allows specialized compare instructions like
10448 * compare_imm, compare_membase
10449 * - most back ends unify fp compare+branch, fp compare+ceq
10450 * - integrate handle_stack_args into inline_method
10451 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10452 * - Things to backport to the old JIT:
10453 * - op_atomic_exchange fix for amd64
10454 * - localloc fix for amd64
10455 * - x86 type_token change
10457 * - long eq/ne optimizations
10458 * - handle long shift opts on 32 bit platforms somehow: they require
10459 * 3 sregs (2 for arg1 and 1 for arg2)
10460 * - make byref a 'normal' type.
10461 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10462 * variable if needed.
10463 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10464 * like inline_method.
10465 * - remove inlining restrictions
10466 * - remove mono_save_args.
10467 * - add 'introduce a new optimization to simplify some range checks'
10468 * - fix LNEG and enable cfold of INEG
10469 * - generalize x86 optimizations like ldelema as a peephole optimization
10470 * - add store_mem_imm for amd64
10471 * - optimize the loading of the interruption flag in the managed->native wrappers
10472 * - avoid special handling of OP_NOP in passes
10473 * - move code inserting instructions into one function/macro.
10474 * - cleanup the code replacement in decompose_long_opts ()
10475 * - try a coalescing phase after liveness analysis
10476 * - add float -> vreg conversion + local optimizations on !x86
10477 * - figure out how to handle decomposed branches during optimizations, ie.
10478 * compare+branch, op_jump_table+op_br etc.
10479 * - promote RuntimeXHandles to vregs
10480 * - vtype cleanups:
10481 * - add a NEW_VARLOADA_VREG macro
10482 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10483 * accessing vtype fields.
10484 * - get rid of I8CONST on 64 bit platforms
10485 * - dealing with the increase in code size due to branches created during opcode
10487 * - use extended basic blocks
10488 * - all parts of the JIT
10489 * - handle_global_vregs () && local regalloc
10490 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10491 * - sources of increase in code size:
10494 * - isinst and castclass
10495 * - lvregs not allocated to global registers even if used multiple times
10496 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10498 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10499 * - add all micro optimizations from the old JIT
10500 * - put tree optimizations into the deadce pass
10501 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10502 * specific function.
10503 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10504 * fcompare + branchCC.
10505 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10506 * running generics.exe.
10507 * - create a helper function for allocating a stack slot, taking into account
10508 * MONO_CFG_HAS_SPILLUP.
10509 * - merge new GC changes in mini.c.
10511 * - merge the ia64 switch changes.
10512 * - merge the mips conditional changes.
10513 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10514 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10515 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10516 * - optimize mono_regstate2_alloc_int/float.
10517 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10518 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10519 * parts of the tree could be separated by other instructions, killing the tree
10520 * arguments, or stores killing loads etc. Also, should we fold loads into other
10521 * instructions if the result of the load is used multiple times ?
10522 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10523 * - LAST MERGE: 108395.
10524 * - when returning vtypes in registers, generate IR and append it to the end of the
10525 * last bb instead of doing it in the epilog.
10526 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10527 * ones in inssel.h.
10528 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10536 - When to decompose opcodes:
10537 - earlier: this makes some optimizations hard to implement, since the low level IR
10538 no longer contains the neccessary information. But it is easier to do.
10539 - later: harder to implement, enables more optimizations.
10540 - Branches inside bblocks:
10541 - created when decomposing complex opcodes.
10542 - branches to another bblock: harmless, but not tracked by the branch
10543 optimizations, so need to branch to a label at the start of the bblock.
10544 - branches to inside the same bblock: very problematic, trips up the local
10545 reg allocator. Can be fixed by spitting the current bblock, but that is a
10546 complex operation, since some local vregs can become global vregs etc.
10547 - Local/global vregs:
10548 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10549 local register allocator.
10550 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10551 structure, created by mono_create_var (). Assigned to hregs or the stack by
10552 the global register allocator.
10553 - When to do optimizations like alu->alu_imm:
10554 - earlier -> saves work later on since the IR will be smaller/simpler
10555 - later -> can work on more instructions
10556 - Handling of valuetypes:
10557 - When a vtype is pushed on the stack, a new tempotary is created, an
10558 instruction computing its address (LDADDR) is emitted and pushed on
10559 the stack. Need to optimize cases when the vtype is used immediately as in
10560 argument passing, stloc etc.
10561 - Instead of the to_end stuff in the old JIT, simply call the function handling
10562 the values on the stack before emitting the last instruction of the bb.