2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(ins, arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2177 if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call);
2196 static MonoJumpInfo *
2197 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2199 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2203 ji->data.target = target;
2208 inline static MonoInst*
2209 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2211 inline static MonoCallInst *
2212 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2213 MonoInst **args, int calli, int virtual)
2216 #ifdef MONO_ARCH_SOFT_FLOAT
2220 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2223 call->signature = sig;
2225 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2231 temp->backend.is_pinvoke = sig->pinvoke;
2234 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2235 * address of return value to increase optimization opportunities.
2236 * Before vtype decomposition, the dreg of the call ins itself represents the
2237 * fact the call modifies the return value. After decomposition, the call will
2238 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2239 * will be transformed into an LDADDR.
2241 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2242 loada->dreg = alloc_preg (cfg);
2243 loada->inst_p0 = temp;
2244 /* We reference the call too since call->dreg could change during optimization */
2245 loada->inst_p1 = call;
2246 MONO_ADD_INS (cfg->cbb, loada);
2248 call->inst.dreg = temp->dreg;
2250 call->vret_var = loada;
2251 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2252 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2254 #ifdef MONO_ARCH_SOFT_FLOAT
2256 * If the call has a float argument, we would need to do an r8->r4 conversion using
2257 * an icall, but that cannot be done during the call sequence since it would clobber
2258 * the call registers + the stack. So we do it before emitting the call.
2260 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2262 MonoInst *in = call->args [i];
2264 if (i >= sig->hasthis)
2265 t = sig->params [i - sig->hasthis];
2267 t = &mono_defaults.int_class->byval_arg;
2268 t = mono_type_get_underlying_type (t);
2270 if (!t->byref && t->type == MONO_TYPE_R4) {
2271 MonoInst *iargs [1];
2275 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2277 /* The result will be in an int vreg */
2278 call->args [i] = conv;
2283 mono_arch_emit_call (cfg, call);
2285 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2286 cfg->flags |= MONO_CFG_HAS_CALLS;
2291 inline static MonoInst*
2292 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2294 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2296 call->inst.sreg1 = addr->dreg;
2298 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2300 return (MonoInst*)call;
2303 inline static MonoInst*
2304 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2306 #ifdef MONO_ARCH_RGCTX_REG
2308 int rgctx_reg = mono_alloc_preg (cfg);
2310 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2311 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2312 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2313 return (MonoInst*)call;
2315 g_assert_not_reached ();
2321 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2322 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2324 gboolean virtual = this != NULL;
2325 gboolean enable_for_aot = TRUE;
2328 if (method->string_ctor) {
2329 /* Create the real signature */
2330 /* FIXME: Cache these */
2331 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2332 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2337 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2339 if (this && sig->hasthis &&
2340 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2341 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2342 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2344 call->method = method;
2346 call->inst.flags |= MONO_INST_HAS_METHOD;
2347 call->inst.inst_left = this;
2350 int vtable_reg, slot_reg, this_reg;
2352 this_reg = this->dreg;
2354 if ((!cfg->compile_aot || enable_for_aot) &&
2355 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2356 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2357 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2359 * the method is not virtual, we just need to ensure this is not null
2360 * and then we can call the method directly.
2362 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2363 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2366 if (!method->string_ctor) {
2367 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2368 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2369 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2372 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2374 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2376 return (MonoInst*)call;
2379 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2380 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2381 /* Make a call to delegate->invoke_impl */
2382 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2383 call->inst.inst_basereg = this_reg;
2384 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2385 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2387 return (MonoInst*)call;
2391 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2392 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2393 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2395 * the method is virtual, but we can statically dispatch since either
2396 * it's class or the method itself are sealed.
2397 * But first we need to ensure it's not a null reference.
2399 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2400 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2401 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2403 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2404 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2406 return (MonoInst*)call;
2409 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2411 /* Initialize method->slot */
2412 mono_class_setup_vtable (method->klass);
2414 vtable_reg = alloc_preg (cfg);
2415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2416 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2418 #ifdef MONO_ARCH_HAVE_IMT
2420 guint32 imt_slot = mono_method_get_imt_slot (method);
2421 emit_imt_argument (cfg, call, imt_arg);
2422 slot_reg = vtable_reg;
2423 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2426 if (slot_reg == -1) {
2427 slot_reg = alloc_preg (cfg);
2428 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2429 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2432 slot_reg = vtable_reg;
2433 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2436 call->inst.sreg1 = slot_reg;
2437 call->virtual = TRUE;
2440 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2442 return (MonoInst*)call;
2445 static inline MonoInst*
2446 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2448 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2452 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2459 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2462 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2464 return (MonoInst*)call;
2467 inline static MonoInst*
2468 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2470 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2474 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2478 * mono_emit_abs_call:
2480 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2482 inline static MonoInst*
2483 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2484 MonoMethodSignature *sig, MonoInst **args)
2486 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2490 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2493 if (cfg->abs_patches == NULL)
2494 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2495 g_hash_table_insert (cfg->abs_patches, ji, ji);
2496 ins = mono_emit_native_call (cfg, ji, sig, args);
2497 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2502 get_memcpy_method (void)
2504 static MonoMethod *memcpy_method = NULL;
2505 if (!memcpy_method) {
2506 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2508 g_error ("Old corlib found. Install a new one");
2510 return memcpy_method;
2514 * Emit code to copy a valuetype of type @klass whose address is stored in
2515 * @src->dreg to memory whose address is stored at @dest->dreg.
2518 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2520 MonoInst *iargs [3];
2523 MonoMethod *memcpy_method;
2527 * This check breaks with spilled vars... need to handle it during verification anyway.
2528 * g_assert (klass && klass == src->klass && klass == dest->klass);
2532 n = mono_class_native_size (klass, &align);
2534 n = mono_class_value_size (klass, &align);
2536 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2537 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2538 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2542 EMIT_NEW_ICONST (cfg, iargs [2], n);
2544 memcpy_method = get_memcpy_method ();
2545 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2550 get_memset_method (void)
2552 static MonoMethod *memset_method = NULL;
2553 if (!memset_method) {
2554 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2556 g_error ("Old corlib found. Install a new one");
2558 return memset_method;
2562 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2564 MonoInst *iargs [3];
2567 MonoMethod *memset_method;
2569 /* FIXME: Optimize this for the case when dest is an LDADDR */
2571 mono_class_init (klass);
2572 n = mono_class_value_size (klass, &align);
2574 if (n <= sizeof (gpointer) * 5) {
2575 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2578 memset_method = get_memset_method ();
2580 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2581 EMIT_NEW_ICONST (cfg, iargs [2], n);
2582 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2587 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2589 MonoInst *this = NULL;
2591 g_assert (!method->klass->valuetype);
2593 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2594 EMIT_NEW_ARGLOAD (cfg, this, 0);
2596 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2597 MonoInst *mrgctx_loc, *mrgctx_var;
2600 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2602 mrgctx_loc = mono_get_vtable_var (cfg);
2603 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2606 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2607 MonoInst *vtable_loc, *vtable_var;
2611 vtable_loc = mono_get_vtable_var (cfg);
2612 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2614 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2615 MonoInst *mrgctx_var = vtable_var;
2618 vtable_reg = alloc_preg (cfg);
2619 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2620 vtable_var->type = STACK_PTR;
2626 int vtable_reg, res_reg;
2628 vtable_reg = alloc_preg (cfg);
2629 res_reg = alloc_preg (cfg);
2630 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2635 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2636 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2637 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2640 static MonoJumpInfoRgctxEntry *
2641 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2643 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2644 res->method = method;
2645 res->in_mrgctx = in_mrgctx;
2646 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2647 res->data->type = patch_type;
2648 res->data->data.target = patch_data;
2649 res->info_type = info_type;
2654 static inline MonoInst*
2655 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2657 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2661 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2662 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2664 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2666 return emit_rgctx_fetch (cfg, rgctx, entry);
2670 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2671 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2673 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHOD, cmethod, rgctx_type);
2675 return emit_rgctx_fetch (cfg, rgctx, entry);
2679 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2680 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2682 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2684 return emit_rgctx_fetch (cfg, rgctx, entry);
2688 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2692 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2694 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2697 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2698 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2700 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2702 return mono_emit_method_call (cfg, method, &val, NULL);
2707 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2711 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2712 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2713 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2714 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2716 obj_reg = sp [0]->dreg;
2717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2720 /* FIXME: generics */
2721 g_assert (klass->rank == 0);
2724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2725 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2731 MonoInst *element_class;
2733 /* This assertion is from the unboxcast insn */
2734 g_assert (klass->rank == 0);
2736 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2737 klass->element_class, MONO_RGCTX_INFO_KLASS);
2739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2740 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2742 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2745 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2746 MONO_ADD_INS (cfg->cbb, add);
2747 add->type = STACK_MP;
2754 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2756 MonoInst *iargs [2];
2759 if (cfg->opt & MONO_OPT_SHARED) {
2760 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2761 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2763 alloc_ftn = mono_object_new;
2764 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2765 /* This happens often in argument checking code, eg. throw new FooException... */
2766 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2767 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2768 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2770 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2771 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2774 if (managed_alloc) {
2775 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2776 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2778 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2780 guint32 lw = vtable->klass->instance_size;
2781 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2782 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2783 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2786 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2790 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2794 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2797 MonoInst *iargs [2];
2798 MonoMethod *managed_alloc = NULL;
2802 FIXME: we cannot get managed_alloc here because we can't get
2803 the class's vtable (because it's not a closed class)
2805 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2806 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2809 if (cfg->opt & MONO_OPT_SHARED) {
2810 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2811 iargs [1] = data_inst;
2812 alloc_ftn = mono_object_new;
2814 if (managed_alloc) {
2815 iargs [0] = data_inst;
2816 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2819 iargs [0] = data_inst;
2820 alloc_ftn = mono_object_new_specific;
2823 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2827 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2829 MonoInst *alloc, *ins;
2831 if (mono_class_is_nullable (klass)) {
2832 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2833 return mono_emit_method_call (cfg, method, &val, NULL);
2836 alloc = handle_alloc (cfg, klass, TRUE);
2838 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2844 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *rgctx, MonoInst *data_inst)
2846 MonoInst *alloc, *ins;
2848 if (mono_class_is_nullable (klass)) {
2849 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2850 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2851 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2853 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2855 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2857 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2864 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2866 MonoBasicBlock *is_null_bb;
2867 int obj_reg = src->dreg;
2868 int vtable_reg = alloc_preg (cfg);
2870 NEW_BBLOCK (cfg, is_null_bb);
2872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2875 if (mini_get_debug_options ()->better_cast_details) {
2876 int to_klass_reg = alloc_preg (cfg);
2877 int klass_reg = alloc_preg (cfg);
2878 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2881 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2885 MONO_ADD_INS (cfg->cbb, tls_get);
2886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2890 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2894 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2896 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2898 int klass_reg = alloc_preg (cfg);
2900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2902 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2903 /* the remoting code is broken, access the class for now */
2905 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2911 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2914 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2918 MONO_START_BB (cfg, is_null_bb);
2920 /* Reset the variables holding the cast details */
2921 if (mini_get_debug_options ()->better_cast_details) {
2922 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2924 MONO_ADD_INS (cfg->cbb, tls_get);
2925 /* It is enough to reset the from field */
2926 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2933 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2936 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2937 int obj_reg = src->dreg;
2938 int vtable_reg = alloc_preg (cfg);
2939 int res_reg = alloc_preg (cfg);
2941 NEW_BBLOCK (cfg, is_null_bb);
2942 NEW_BBLOCK (cfg, false_bb);
2943 NEW_BBLOCK (cfg, end_bb);
2945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2946 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2948 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2950 /* the is_null_bb target simply copies the input register to the output */
2951 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2953 int klass_reg = alloc_preg (cfg);
2955 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2958 int rank_reg = alloc_preg (cfg);
2959 int eclass_reg = alloc_preg (cfg);
2961 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2963 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2966 if (klass->cast_class == mono_defaults.object_class) {
2967 int parent_reg = alloc_preg (cfg);
2968 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2969 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2970 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2972 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2973 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2974 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2975 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2976 } else if (klass->cast_class == mono_defaults.enum_class) {
2977 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2978 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2979 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2980 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2982 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2983 /* Check that the object is a vector too */
2984 int bounds_reg = alloc_preg (cfg);
2985 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2990 /* the is_null_bb target simply copies the input register to the output */
2991 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2993 } else if (mono_class_is_nullable (klass)) {
2994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2995 /* the is_null_bb target simply copies the input register to the output */
2996 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2998 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2999 /* the remoting code is broken, access the class for now */
3001 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3004 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3011 /* the is_null_bb target simply copies the input register to the output */
3012 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3017 MONO_START_BB (cfg, false_bb);
3019 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3020 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3022 MONO_START_BB (cfg, is_null_bb);
3024 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3025 ins->type = STACK_OBJ;
3028 MONO_START_BB (cfg, end_bb);
3034 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3036 /* This opcode takes as input an object reference and a class, and returns:
3037 0) if the object is an instance of the class,
3038 1) if the object is not instance of the class,
3039 2) if the object is a proxy whose type cannot be determined */
3042 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3043 int obj_reg = src->dreg;
3044 int dreg = alloc_ireg (cfg);
3046 int klass_reg = alloc_preg (cfg);
3048 NEW_BBLOCK (cfg, true_bb);
3049 NEW_BBLOCK (cfg, false_bb);
3050 NEW_BBLOCK (cfg, false2_bb);
3051 NEW_BBLOCK (cfg, end_bb);
3052 NEW_BBLOCK (cfg, no_proxy_bb);
3054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3057 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3058 NEW_BBLOCK (cfg, interface_fail_bb);
3060 tmp_reg = alloc_preg (cfg);
3061 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3062 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3063 MONO_START_BB (cfg, interface_fail_bb);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3066 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3068 tmp_reg = alloc_preg (cfg);
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3073 tmp_reg = alloc_preg (cfg);
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3077 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3078 tmp_reg = alloc_preg (cfg);
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3082 tmp_reg = alloc_preg (cfg);
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3085 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3087 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3088 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3090 MONO_START_BB (cfg, no_proxy_bb);
3092 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3095 MONO_START_BB (cfg, false_bb);
3097 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3100 MONO_START_BB (cfg, false2_bb);
3102 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3105 MONO_START_BB (cfg, true_bb);
3107 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3109 MONO_START_BB (cfg, end_bb);
3112 MONO_INST_NEW (cfg, ins, OP_ICONST);
3114 ins->type = STACK_I4;
3120 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3122 /* This opcode takes as input an object reference and a class, and returns:
3123 0) if the object is an instance of the class,
3124 1) if the object is a proxy whose type cannot be determined
3125 an InvalidCastException exception is thrown otherwhise*/
3128 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3129 int obj_reg = src->dreg;
3130 int dreg = alloc_ireg (cfg);
3131 int tmp_reg = alloc_preg (cfg);
3132 int klass_reg = alloc_preg (cfg);
3134 NEW_BBLOCK (cfg, end_bb);
3135 NEW_BBLOCK (cfg, ok_result_bb);
3137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3140 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3141 NEW_BBLOCK (cfg, interface_fail_bb);
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3144 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3145 MONO_START_BB (cfg, interface_fail_bb);
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3148 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3150 tmp_reg = alloc_preg (cfg);
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3153 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3155 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3159 NEW_BBLOCK (cfg, no_proxy_bb);
3161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3163 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3165 tmp_reg = alloc_preg (cfg);
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3169 tmp_reg = alloc_preg (cfg);
3170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3174 NEW_BBLOCK (cfg, fail_1_bb);
3176 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3178 MONO_START_BB (cfg, fail_1_bb);
3180 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3183 MONO_START_BB (cfg, no_proxy_bb);
3185 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3188 MONO_START_BB (cfg, ok_result_bb);
3190 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3192 MONO_START_BB (cfg, end_bb);
3195 MONO_INST_NEW (cfg, ins, OP_ICONST);
3197 ins->type = STACK_I4;
3202 static G_GNUC_UNUSED MonoInst*
3203 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3205 gpointer *trampoline;
3206 MonoInst *obj, *method_ins, *tramp_ins;
3210 obj = handle_alloc (cfg, klass, FALSE);
3212 /* Inline the contents of mono_delegate_ctor */
3214 /* Set target field */
3215 /* Optimize away setting of NULL target */
3216 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3217 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3219 /* Set method field */
3220 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3221 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3224 * To avoid looking up the compiled code belonging to the target method
3225 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3226 * store it, and we fill it after the method has been compiled.
3228 if (!cfg->compile_aot && !method->dynamic) {
3229 MonoInst *code_slot_ins;
3231 domain = mono_domain_get ();
3232 mono_domain_lock (domain);
3233 if (!domain->method_code_hash)
3234 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3235 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3237 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3238 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3240 mono_domain_unlock (domain);
3242 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3246 /* Set invoke_impl field */
3247 trampoline = mono_create_delegate_trampoline (klass);
3248 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3249 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3251 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3257 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3259 MonoJitICallInfo *info;
3261 /* Need to register the icall so it gets an icall wrapper */
3262 info = mono_get_array_new_va_icall (rank);
3264 cfg->flags |= MONO_CFG_HAS_VARARGS;
3266 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3267 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3271 mono_emit_load_got_addr (MonoCompile *cfg)
3273 MonoInst *getaddr, *dummy_use;
3275 if (!cfg->got_var || cfg->got_var_allocated)
3278 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3279 getaddr->dreg = cfg->got_var->dreg;
3281 /* Add it to the start of the first bblock */
3282 if (cfg->bb_entry->code) {
3283 getaddr->next = cfg->bb_entry->code;
3284 cfg->bb_entry->code = getaddr;
3287 MONO_ADD_INS (cfg->bb_entry, getaddr);
3289 cfg->got_var_allocated = TRUE;
3292 * Add a dummy use to keep the got_var alive, since real uses might
3293 * only be generated by the back ends.
3294 * Add it to end_bblock, so the variable's lifetime covers the whole
3296 * It would be better to make the usage of the got var explicit in all
3297 * cases when the backend needs it (i.e. calls, throw etc.), so this
3298 * wouldn't be needed.
3300 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3301 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3305 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3307 MonoMethodHeader *header = mono_method_get_header (method);
3309 #ifdef MONO_ARCH_SOFT_FLOAT
3310 MonoMethodSignature *sig = mono_method_signature (method);
3314 if (cfg->generic_sharing_context)
3317 #ifdef MONO_ARCH_HAVE_LMF_OPS
3318 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3319 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3320 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3324 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3325 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3326 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3327 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3328 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3329 (method->klass->marshalbyref) ||
3330 !header || header->num_clauses)
3333 /* also consider num_locals? */
3334 /* Do the size check early to avoid creating vtables */
3335 if (getenv ("MONO_INLINELIMIT")) {
3336 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3339 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3343 * if we can initialize the class of the method right away, we do,
3344 * otherwise we don't allow inlining if the class needs initialization,
3345 * since it would mean inserting a call to mono_runtime_class_init()
3346 * inside the inlined code
3348 if (!(cfg->opt & MONO_OPT_SHARED)) {
3349 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3350 if (cfg->run_cctors && method->klass->has_cctor) {
3351 if (!method->klass->runtime_info)
3352 /* No vtable created yet */
3354 vtable = mono_class_vtable (cfg->domain, method->klass);
3357 /* This makes so that inline cannot trigger */
3358 /* .cctors: too many apps depend on them */
3359 /* running with a specific order... */
3360 if (! vtable->initialized)
3362 mono_runtime_class_init (vtable);
3364 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3365 if (!method->klass->runtime_info)
3366 /* No vtable created yet */
3368 vtable = mono_class_vtable (cfg->domain, method->klass);
3371 if (!vtable->initialized)
3376 * If we're compiling for shared code
3377 * the cctor will need to be run at aot method load time, for example,
3378 * or at the end of the compilation of the inlining method.
3380 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3385 * CAS - do not inline methods with declarative security
3386 * Note: this has to be before any possible return TRUE;
3388 if (mono_method_has_declsec (method))
3391 #ifdef MONO_ARCH_SOFT_FLOAT
3393 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3395 for (i = 0; i < sig->param_count; ++i)
3396 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3404 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3406 if (vtable->initialized && !cfg->compile_aot)
3409 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3412 if (!mono_class_needs_cctor_run (vtable->klass, method))
3415 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3416 /* The initialization is already done before the method is called */
3423 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3427 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3429 mono_class_init (klass);
3430 size = mono_class_array_element_size (klass);
3432 mult_reg = alloc_preg (cfg);
3433 array_reg = arr->dreg;
3434 index_reg = index->dreg;
3436 #if SIZEOF_VOID_P == 8
3437 /* The array reg is 64 bits but the index reg is only 32 */
3438 index2_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3441 index2_reg = index_reg;
3444 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3446 #if defined(__i386__) || defined(__x86_64__)
3447 if (size == 1 || size == 2 || size == 4 || size == 8) {
3448 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3450 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3451 ins->type = STACK_PTR;
3457 add_reg = alloc_preg (cfg);
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3461 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3462 ins->type = STACK_PTR;
3463 MONO_ADD_INS (cfg->cbb, ins);
3468 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3470 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3472 int bounds_reg = alloc_preg (cfg);
3473 int add_reg = alloc_preg (cfg);
3474 int mult_reg = alloc_preg (cfg);
3475 int mult2_reg = alloc_preg (cfg);
3476 int low1_reg = alloc_preg (cfg);
3477 int low2_reg = alloc_preg (cfg);
3478 int high1_reg = alloc_preg (cfg);
3479 int high2_reg = alloc_preg (cfg);
3480 int realidx1_reg = alloc_preg (cfg);
3481 int realidx2_reg = alloc_preg (cfg);
3482 int sum_reg = alloc_preg (cfg);
3487 mono_class_init (klass);
3488 size = mono_class_array_element_size (klass);
3490 index1 = index_ins1->dreg;
3491 index2 = index_ins2->dreg;
3493 /* range checking */
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3495 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3498 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3499 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3501 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3503 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3506 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3507 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3509 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3511 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3513 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3516 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3517 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3519 ins->type = STACK_MP;
3521 MONO_ADD_INS (cfg->cbb, ins);
3528 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3532 MonoMethod *addr_method;
3535 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3538 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3540 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3541 /* emit_ldelema_2 depends on OP_LMUL */
3542 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3543 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3547 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3548 addr_method = mono_marshal_get_array_address (rank, element_size);
3549 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3555 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3557 MonoInst *ins = NULL;
3559 static MonoClass *runtime_helpers_class = NULL;
3560 if (! runtime_helpers_class)
3561 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3562 "System.Runtime.CompilerServices", "RuntimeHelpers");
3564 if (cmethod->klass == mono_defaults.string_class) {
3565 if (strcmp (cmethod->name, "get_Chars") == 0) {
3566 int dreg = alloc_ireg (cfg);
3567 int index_reg = alloc_preg (cfg);
3568 int mult_reg = alloc_preg (cfg);
3569 int add_reg = alloc_preg (cfg);
3571 #if SIZEOF_VOID_P == 8
3572 /* The array reg is 64 bits but the index reg is only 32 */
3573 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3575 index_reg = args [1]->dreg;
3577 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3579 #if defined(__i386__) || defined(__x86_64__)
3580 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3581 add_reg = ins->dreg;
3582 /* Avoid a warning */
3584 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3588 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3589 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3590 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3592 type_from_op (ins, NULL, NULL);
3594 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3595 int dreg = alloc_ireg (cfg);
3596 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3597 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3598 type_from_op (ins, NULL, NULL);
3601 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3602 int mult_reg = alloc_preg (cfg);
3603 int add_reg = alloc_preg (cfg);
3605 /* The corlib functions check for oob already. */
3606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3607 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3608 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3611 } else if (cmethod->klass == mono_defaults.object_class) {
3613 if (strcmp (cmethod->name, "GetType") == 0) {
3614 int dreg = alloc_preg (cfg);
3615 int vt_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3617 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3618 type_from_op (ins, NULL, NULL);
3621 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3622 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3623 int dreg = alloc_ireg (cfg);
3624 int t1 = alloc_ireg (cfg);
3626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3627 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3628 ins->type = STACK_I4;
3632 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3633 MONO_INST_NEW (cfg, ins, OP_NOP);
3634 MONO_ADD_INS (cfg->cbb, ins);
3638 } else if (cmethod->klass == mono_defaults.array_class) {
3639 if (cmethod->name [0] != 'g')
3642 if (strcmp (cmethod->name, "get_Rank") == 0) {
3643 int dreg = alloc_ireg (cfg);
3644 int vtable_reg = alloc_preg (cfg);
3645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3646 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3647 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3648 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3649 type_from_op (ins, NULL, NULL);
3652 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3653 int dreg = alloc_ireg (cfg);
3655 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3656 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3657 type_from_op (ins, NULL, NULL);
3662 } else if (cmethod->klass == runtime_helpers_class) {
3664 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3665 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3669 } else if (cmethod->klass == mono_defaults.thread_class) {
3670 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3671 ins->dreg = alloc_preg (cfg);
3672 ins->type = STACK_OBJ;
3673 MONO_ADD_INS (cfg->cbb, ins);
3675 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3676 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3677 MONO_ADD_INS (cfg->cbb, ins);
3680 } else if (mini_class_is_system_array (cmethod->klass) &&
3681 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3682 MonoInst *addr, *store, *load;
3683 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3685 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3686 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3687 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3689 } else if (cmethod->klass->image == mono_defaults.corlib &&
3690 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3691 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3694 #if SIZEOF_VOID_P == 8
3695 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3696 /* 64 bit reads are already atomic */
3697 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3698 ins->dreg = mono_alloc_preg (cfg);
3699 ins->inst_basereg = args [0]->dreg;
3700 ins->inst_offset = 0;
3701 MONO_ADD_INS (cfg->cbb, ins);
3705 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3706 if (strcmp (cmethod->name, "Increment") == 0) {
3707 MonoInst *ins_iconst;
3710 if (fsig->params [0]->type == MONO_TYPE_I4)
3711 opcode = OP_ATOMIC_ADD_NEW_I4;
3712 #if SIZEOF_VOID_P == 8
3713 else if (fsig->params [0]->type == MONO_TYPE_I8)
3714 opcode = OP_ATOMIC_ADD_NEW_I8;
3717 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3718 ins_iconst->inst_c0 = 1;
3719 ins_iconst->dreg = mono_alloc_ireg (cfg);
3720 MONO_ADD_INS (cfg->cbb, ins_iconst);
3722 MONO_INST_NEW (cfg, ins, opcode);
3723 ins->dreg = mono_alloc_ireg (cfg);
3724 ins->inst_basereg = args [0]->dreg;
3725 ins->inst_offset = 0;
3726 ins->sreg2 = ins_iconst->dreg;
3727 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3728 MONO_ADD_INS (cfg->cbb, ins);
3730 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3731 MonoInst *ins_iconst;
3734 if (fsig->params [0]->type == MONO_TYPE_I4)
3735 opcode = OP_ATOMIC_ADD_NEW_I4;
3736 #if SIZEOF_VOID_P == 8
3737 else if (fsig->params [0]->type == MONO_TYPE_I8)
3738 opcode = OP_ATOMIC_ADD_NEW_I8;
3741 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3742 ins_iconst->inst_c0 = -1;
3743 ins_iconst->dreg = mono_alloc_ireg (cfg);
3744 MONO_ADD_INS (cfg->cbb, ins_iconst);
3746 MONO_INST_NEW (cfg, ins, opcode);
3747 ins->dreg = mono_alloc_ireg (cfg);
3748 ins->inst_basereg = args [0]->dreg;
3749 ins->inst_offset = 0;
3750 ins->sreg2 = ins_iconst->dreg;
3751 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3752 MONO_ADD_INS (cfg->cbb, ins);
3754 } else if (strcmp (cmethod->name, "Add") == 0) {
3757 if (fsig->params [0]->type == MONO_TYPE_I4)
3758 opcode = OP_ATOMIC_ADD_NEW_I4;
3759 #if SIZEOF_VOID_P == 8
3760 else if (fsig->params [0]->type == MONO_TYPE_I8)
3761 opcode = OP_ATOMIC_ADD_NEW_I8;
3765 MONO_INST_NEW (cfg, ins, opcode);
3766 ins->dreg = mono_alloc_ireg (cfg);
3767 ins->inst_basereg = args [0]->dreg;
3768 ins->inst_offset = 0;
3769 ins->sreg2 = args [1]->dreg;
3770 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3771 MONO_ADD_INS (cfg->cbb, ins);
3774 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3776 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3777 if (strcmp (cmethod->name, "Exchange") == 0) {
3780 if (fsig->params [0]->type == MONO_TYPE_I4)
3781 opcode = OP_ATOMIC_EXCHANGE_I4;
3782 #if SIZEOF_VOID_P == 8
3783 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3784 (fsig->params [0]->type == MONO_TYPE_I) ||
3785 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3786 opcode = OP_ATOMIC_EXCHANGE_I8;
3788 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3789 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3790 opcode = OP_ATOMIC_EXCHANGE_I4;
3795 MONO_INST_NEW (cfg, ins, opcode);
3796 ins->dreg = mono_alloc_ireg (cfg);
3797 ins->inst_basereg = args [0]->dreg;
3798 ins->inst_offset = 0;
3799 ins->sreg2 = args [1]->dreg;
3800 MONO_ADD_INS (cfg->cbb, ins);
3802 switch (fsig->params [0]->type) {
3804 ins->type = STACK_I4;
3808 ins->type = STACK_I8;
3810 case MONO_TYPE_OBJECT:
3811 ins->type = STACK_OBJ;
3814 g_assert_not_reached ();
3817 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3819 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3821 * Can't implement CompareExchange methods this way since they have
3822 * three arguments. We can implement one of the common cases, where the new
3823 * value is a constant.
3825 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3826 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3827 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3828 ins->dreg = alloc_ireg (cfg);
3829 ins->sreg1 = args [0]->dreg;
3830 ins->sreg2 = args [1]->dreg;
3831 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3832 ins->type = STACK_I4;
3833 MONO_ADD_INS (cfg->cbb, ins);
3835 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3837 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3841 } else if (cmethod->klass->image == mono_defaults.corlib) {
3842 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3843 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3844 MONO_INST_NEW (cfg, ins, OP_BREAK);
3845 MONO_ADD_INS (cfg->cbb, ins);
3848 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3849 && strcmp (cmethod->klass->name, "Environment") == 0) {
3850 #ifdef PLATFORM_WIN32
3851 EMIT_NEW_ICONST (cfg, ins, 1);
3853 EMIT_NEW_ICONST (cfg, ins, 0);
3857 } else if (cmethod->klass == mono_defaults.math_class) {
3859 * There is general branches code for Min/Max, but it does not work for
3861 * http://everything2.com/?node_id=1051618
3865 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3869 * This entry point could be used later for arbitrary method
3872 inline static MonoInst*
3873 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3874 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3876 if (method->klass == mono_defaults.string_class) {
3877 /* managed string allocation support */
3878 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3879 MonoInst *iargs [2];
3880 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3881 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3884 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3885 iargs [1] = args [0];
3886 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3893 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp, MonoInst **args)
3895 MonoInst *store, *temp;
3898 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3899 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3902 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3903 * would be different than the MonoInst's used to represent arguments, and
3904 * the ldelema implementation can't deal with that.
3905 * Solution: When ldelema is used on an inline argument, create a var for
3906 * it, emit ldelema on that var, and emit the saving code below in
3907 * inline_method () if needed.
3909 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3911 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, *sp);
3912 store->cil_code = sp [0]->cil_code;
3917 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3918 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3920 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3922 check_inline_called_method_name_limit (MonoMethod *called_method)
3925 static char *limit = NULL;
3927 if (limit == NULL) {
3928 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3930 if (limit_string != NULL)
3931 limit = limit_string;
3933 limit = (char *) "";
3936 if (limit [0] != '\0') {
3937 char *called_method_name = mono_method_full_name (called_method, TRUE);
3939 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3940 g_free (called_method_name);
3942 //return (strncmp_result <= 0);
3943 return (strncmp_result == 0);
3950 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3952 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3955 static char *limit = NULL;
3957 if (limit == NULL) {
3958 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3959 if (limit_string != NULL) {
3960 limit = limit_string;
3962 limit = (char *) "";
3966 if (limit [0] != '\0') {
3967 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3969 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3970 g_free (caller_method_name);
3972 //return (strncmp_result <= 0);
3973 return (strncmp_result == 0);
3981 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3982 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3984 MonoInst *ins, *rvar = NULL;
3985 MonoMethodHeader *cheader;
3986 MonoBasicBlock *ebblock, *sbblock;
3988 MonoMethod *prev_inlined_method;
3989 MonoInst **prev_locals, **prev_args;
3990 MonoType **prev_arg_types;
3991 guint prev_real_offset;
3992 GHashTable *prev_cbb_hash;
3993 MonoBasicBlock **prev_cil_offset_to_bb;
3994 MonoBasicBlock *prev_cbb;
3995 unsigned char* prev_cil_start;
3996 guint32 prev_cil_offset_to_bb_len;
3997 MonoMethod *prev_current_method;
3998 MonoGenericContext *prev_generic_context;
4000 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4002 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4003 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4006 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4007 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4011 if (cfg->verbose_level > 2)
4012 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4014 if (!cmethod->inline_info) {
4015 mono_jit_stats.inlineable_methods++;
4016 cmethod->inline_info = 1;
4018 /* allocate space to store the return value */
4019 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4020 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4023 /* allocate local variables */
4024 cheader = mono_method_get_header (cmethod);
4025 prev_locals = cfg->locals;
4026 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4027 for (i = 0; i < cheader->num_locals; ++i)
4028 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4030 /* allocate start and end blocks */
4031 /* This is needed so if the inline is aborted, we can clean up */
4032 NEW_BBLOCK (cfg, sbblock);
4033 sbblock->real_offset = real_offset;
4035 NEW_BBLOCK (cfg, ebblock);
4036 ebblock->block_num = cfg->num_bblocks++;
4037 ebblock->real_offset = real_offset;
4039 prev_args = cfg->args;
4040 prev_arg_types = cfg->arg_types;
4041 prev_inlined_method = cfg->inlined_method;
4042 cfg->inlined_method = cmethod;
4043 cfg->ret_var_set = FALSE;
4044 prev_real_offset = cfg->real_offset;
4045 prev_cbb_hash = cfg->cbb_hash;
4046 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4047 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4048 prev_cil_start = cfg->cil_start;
4049 prev_cbb = cfg->cbb;
4050 prev_current_method = cfg->current_method;
4051 prev_generic_context = cfg->generic_context;
4053 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4055 cfg->inlined_method = prev_inlined_method;
4056 cfg->real_offset = prev_real_offset;
4057 cfg->cbb_hash = prev_cbb_hash;
4058 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4059 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4060 cfg->cil_start = prev_cil_start;
4061 cfg->locals = prev_locals;
4062 cfg->args = prev_args;
4063 cfg->arg_types = prev_arg_types;
4064 cfg->current_method = prev_current_method;
4065 cfg->generic_context = prev_generic_context;
4067 if ((costs >= 0 && costs < 60) || inline_allways) {
4068 if (cfg->verbose_level > 2)
4069 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4071 mono_jit_stats.inlined_methods++;
4073 /* always add some code to avoid block split failures */
4074 MONO_INST_NEW (cfg, ins, OP_NOP);
4075 MONO_ADD_INS (prev_cbb, ins);
4077 prev_cbb->next_bb = sbblock;
4078 link_bblock (cfg, prev_cbb, sbblock);
4081 * Get rid of the begin and end bblocks if possible to aid local
4084 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4086 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4087 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4089 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4090 MonoBasicBlock *prev = ebblock->in_bb [0];
4091 mono_merge_basic_blocks (cfg, prev, ebblock);
4099 * If the inlined method contains only a throw, then the ret var is not
4100 * set, so set it to a dummy value.
4102 if (!cfg->ret_var_set) {
4103 static double r8_0 = 0.0;
4105 switch (rvar->type) {
4107 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4110 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4115 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4118 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4119 ins->type = STACK_R8;
4120 ins->inst_p0 = (void*)&r8_0;
4121 ins->dreg = rvar->dreg;
4122 MONO_ADD_INS (cfg->cbb, ins);
4125 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4128 g_assert_not_reached ();
4132 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4137 if (cfg->verbose_level > 2)
4138 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4139 cfg->exception_type = MONO_EXCEPTION_NONE;
4140 mono_loader_clear_error ();
4142 /* This gets rid of the newly added bblocks */
4143 cfg->cbb = prev_cbb;
4149 * Some of these comments may well be out-of-date.
4150 * Design decisions: we do a single pass over the IL code (and we do bblock
4151 * splitting/merging in the few cases when it's required: a back jump to an IL
4152 * address that was not already seen as bblock starting point).
4153 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4154 * Complex operations are decomposed in simpler ones right away. We need to let the
4155 * arch-specific code peek and poke inside this process somehow (except when the
4156 * optimizations can take advantage of the full semantic info of coarse opcodes).
4157 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4158 * MonoInst->opcode initially is the IL opcode or some simplification of that
4159 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4160 * opcode with value bigger than OP_LAST.
4161 * At this point the IR can be handed over to an interpreter, a dumb code generator
4162 * or to the optimizing code generator that will translate it to SSA form.
4164 * Profiling directed optimizations.
4165 * We may compile by default with few or no optimizations and instrument the code
4166 * or the user may indicate what methods to optimize the most either in a config file
4167 * or through repeated runs where the compiler applies offline the optimizations to
4168 * each method and then decides if it was worth it.
4171 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4172 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4173 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4174 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4175 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4176 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4177 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4178 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4180 /* offset from br.s -> br like opcodes */
4181 #define BIG_BRANCH_OFFSET 13
4184 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4186 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4188 return b == NULL || b == bb;
4192 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4194 unsigned char *ip = start;
4195 unsigned char *target;
4198 MonoBasicBlock *bblock;
4199 const MonoOpcode *opcode;
4202 cli_addr = ip - start;
4203 i = mono_opcode_value ((const guint8 **)&ip, end);
4206 opcode = &mono_opcodes [i];
4207 switch (opcode->argument) {
4208 case MonoInlineNone:
4211 case MonoInlineString:
4212 case MonoInlineType:
4213 case MonoInlineField:
4214 case MonoInlineMethod:
4217 case MonoShortInlineR:
4224 case MonoShortInlineVar:
4225 case MonoShortInlineI:
4228 case MonoShortInlineBrTarget:
4229 target = start + cli_addr + 2 + (signed char)ip [1];
4230 GET_BBLOCK (cfg, bblock, target);
4233 GET_BBLOCK (cfg, bblock, ip);
4235 case MonoInlineBrTarget:
4236 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4237 GET_BBLOCK (cfg, bblock, target);
4240 GET_BBLOCK (cfg, bblock, ip);
4242 case MonoInlineSwitch: {
4243 guint32 n = read32 (ip + 1);
4246 cli_addr += 5 + 4 * n;
4247 target = start + cli_addr;
4248 GET_BBLOCK (cfg, bblock, target);
4250 for (j = 0; j < n; ++j) {
4251 target = start + cli_addr + (gint32)read32 (ip);
4252 GET_BBLOCK (cfg, bblock, target);
4262 g_assert_not_reached ();
4265 if (i == CEE_THROW) {
4266 unsigned char *bb_start = ip - 1;
4268 /* Find the start of the bblock containing the throw */
4270 while ((bb_start >= start) && !bblock) {
4271 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4275 bblock->out_of_line = 1;
4284 static inline MonoMethod *
4285 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4289 if (m->wrapper_type != MONO_WRAPPER_NONE)
4290 return mono_method_get_wrapper_data (m, token);
4292 method = mono_get_method_full (m->klass->image, token, klass, context);
4297 static inline MonoMethod *
4298 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4300 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4302 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4308 static inline MonoClass*
4309 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4313 if (method->wrapper_type != MONO_WRAPPER_NONE)
4314 klass = mono_method_get_wrapper_data (method, token);
4316 klass = mono_class_get_full (method->klass->image, token, context);
4318 mono_class_init (klass);
4323 * Returns TRUE if the JIT should abort inlining because "callee"
4324 * is influenced by security attributes.
4327 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4331 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4335 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4336 if (result == MONO_JIT_SECURITY_OK)
4339 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4340 /* Generate code to throw a SecurityException before the actual call/link */
4341 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4344 NEW_ICONST (cfg, args [0], 4);
4345 NEW_METHODCONST (cfg, args [1], caller);
4346 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4347 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4348 /* don't hide previous results */
4349 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4350 cfg->exception_data = result;
4358 method_access_exception (void)
4360 static MonoMethod *method = NULL;
4363 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4364 method = mono_class_get_method_from_name (secman->securitymanager,
4365 "MethodAccessException", 2);
4372 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4373 MonoBasicBlock *bblock, unsigned char *ip)
4375 MonoMethod *thrower = method_access_exception ();
4378 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4379 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4380 mono_emit_method_call (cfg, thrower, args, NULL);
4384 verification_exception (void)
4386 static MonoMethod *method = NULL;
4389 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4390 method = mono_class_get_method_from_name (secman->securitymanager,
4391 "VerificationException", 0);
4398 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4400 MonoMethod *thrower = verification_exception ();
4402 mono_emit_method_call (cfg, thrower, NULL, NULL);
4406 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4407 MonoBasicBlock *bblock, unsigned char *ip)
4409 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4410 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4411 gboolean is_safe = TRUE;
4413 if (!(caller_level >= callee_level ||
4414 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4415 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4420 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4424 method_is_safe (MonoMethod *method)
4427 if (strcmp (method->name, "unsafeMethod") == 0)
4434 * Check that the IL instructions at ip are the array initialization
4435 * sequence and return the pointer to the data and the size.
4438 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4441 * newarr[System.Int32]
4443 * ldtoken field valuetype ...
4444 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4446 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4447 guint32 token = read32 (ip + 7);
4448 guint32 field_token = read32 (ip + 2);
4449 guint32 field_index = field_token & 0xffffff;
4451 const char *data_ptr;
4453 MonoMethod *cmethod;
4454 MonoClass *dummy_class;
4455 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4461 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4464 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4466 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4467 case MONO_TYPE_BOOLEAN:
4471 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4472 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4473 case MONO_TYPE_CHAR:
4483 return NULL; /* stupid ARM FP swapped format */
4493 if (size > mono_type_size (field->type, &dummy_align))
4496 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4497 field_index = read32 (ip + 2) & 0xffffff;
4498 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4499 data_ptr = mono_image_rva_map (method->klass->image, rva);
4500 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4501 /* for aot code we do the lookup on load */
4502 if (aot && data_ptr)
4503 return GUINT_TO_POINTER (rva);
4510 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4512 char *method_fname = mono_method_full_name (method, TRUE);
4515 if (mono_method_get_header (method)->code_size == 0)
4516 method_code = g_strdup ("method body is empty.");
4518 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4519 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4520 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4521 g_free (method_fname);
4522 g_free (method_code);
4526 set_exception_object (MonoCompile *cfg, MonoException *exception)
4528 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4529 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4530 cfg->exception_ptr = exception;
4534 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4538 if (cfg->generic_sharing_context)
4539 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4541 type = &klass->byval_arg;
4542 return MONO_TYPE_IS_REFERENCE (type);
4546 * mono_decompose_array_access_opts:
4548 * Decompose array access opcodes.
4551 mono_decompose_array_access_opts (MonoCompile *cfg)
4553 MonoBasicBlock *bb, *first_bb;
4556 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4557 * can be executed anytime. It should be run before decompose_long
4561 * Create a dummy bblock and emit code into it so we can use the normal
4562 * code generation macros.
4564 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4565 first_bb = cfg->cbb;
4567 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4569 MonoInst *prev = NULL;
4571 MonoInst *iargs [3];
4574 if (!bb->has_array_access)
4577 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4579 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4585 for (ins = bb->code; ins; ins = ins->next) {
4586 switch (ins->opcode) {
4588 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4589 G_STRUCT_OFFSET (MonoArray, max_length));
4590 MONO_ADD_INS (cfg->cbb, dest);
4592 case OP_BOUNDS_CHECK:
4593 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4596 if (cfg->opt & MONO_OPT_SHARED) {
4597 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4598 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4599 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4600 iargs [2]->dreg = ins->sreg1;
4602 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4603 dest->dreg = ins->dreg;
4605 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4608 NEW_VTABLECONST (cfg, iargs [0], vtable);
4609 MONO_ADD_INS (cfg->cbb, iargs [0]);
4610 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4611 iargs [1]->dreg = ins->sreg1;
4613 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4614 dest->dreg = ins->dreg;
4621 g_assert (cfg->cbb == first_bb);
4623 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4624 /* Replace the original instruction with the new code sequence */
4626 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4627 first_bb->code = first_bb->last_ins = NULL;
4628 first_bb->in_count = first_bb->out_count = 0;
4629 cfg->cbb = first_bb;
4636 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4646 #ifdef MONO_ARCH_SOFT_FLOAT
4649 * mono_handle_soft_float:
4651 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4652 * similar to long support on 32 bit platforms. 32 bit float values require special
4653 * handling when used as locals, arguments, and in calls.
4654 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4657 mono_handle_soft_float (MonoCompile *cfg)
4659 MonoBasicBlock *bb, *first_bb;
4662 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4666 * Create a dummy bblock and emit code into it so we can use the normal
4667 * code generation macros.
4669 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4670 first_bb = cfg->cbb;
4672 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4674 MonoInst *prev = NULL;
4677 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4679 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4685 for (ins = bb->code; ins; ins = ins->next) {
4686 const char *spec = INS_INFO (ins->opcode);
4688 /* Most fp operations are handled automatically by opcode emulation */
4690 switch (ins->opcode) {
4693 d.vald = *(double*)ins->inst_p0;
4694 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4699 /* We load the r8 value */
4700 d.vald = *(float*)ins->inst_p0;
4701 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4705 ins->opcode = OP_LMOVE;
4708 ins->opcode = OP_MOVE;
4709 ins->sreg1 = ins->sreg1 + 1;
4712 ins->opcode = OP_MOVE;
4713 ins->sreg1 = ins->sreg1 + 2;
4716 int reg = ins->sreg1;
4718 ins->opcode = OP_SETLRET;
4720 ins->sreg1 = reg + 1;
4721 ins->sreg2 = reg + 2;
4724 case OP_LOADR8_MEMBASE:
4725 ins->opcode = OP_LOADI8_MEMBASE;
4727 case OP_STORER8_MEMBASE_REG:
4728 ins->opcode = OP_STOREI8_MEMBASE_REG;
4730 case OP_STORER4_MEMBASE_REG: {
4731 MonoInst *iargs [2];
4734 /* Arg 1 is the double value */
4735 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4736 iargs [0]->dreg = ins->sreg1;
4738 /* Arg 2 is the address to store to */
4739 addr_reg = mono_alloc_preg (cfg);
4740 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4741 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4745 case OP_LOADR4_MEMBASE: {
4746 MonoInst *iargs [1];
4750 addr_reg = mono_alloc_preg (cfg);
4751 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4752 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4753 conv->dreg = ins->dreg;
4758 case OP_FCALL_MEMBASE: {
4759 MonoCallInst *call = (MonoCallInst*)ins;
4760 if (call->signature->ret->type == MONO_TYPE_R4) {
4761 MonoCallInst *call2;
4762 MonoInst *iargs [1];
4765 /* Convert the call into a call returning an int */
4766 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4767 memcpy (call2, call, sizeof (MonoCallInst));
4768 switch (ins->opcode) {
4770 call2->inst.opcode = OP_CALL;
4773 call2->inst.opcode = OP_CALL_REG;
4775 case OP_FCALL_MEMBASE:
4776 call2->inst.opcode = OP_CALL_MEMBASE;
4779 g_assert_not_reached ();
4781 call2->inst.dreg = mono_alloc_ireg (cfg);
4782 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4784 /* FIXME: Optimize this */
4786 /* Emit an r4->r8 conversion */
4787 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4788 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4789 conv->dreg = ins->dreg;
4791 switch (ins->opcode) {
4793 ins->opcode = OP_LCALL;
4796 ins->opcode = OP_LCALL_REG;
4798 case OP_FCALL_MEMBASE:
4799 ins->opcode = OP_LCALL_MEMBASE;
4802 g_assert_not_reached ();
4808 MonoJitICallInfo *info;
4809 MonoInst *iargs [2];
4810 MonoInst *call, *cmp, *br;
4812 /* Convert fcompare+fbcc to icall+icompare+beq */
4814 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4817 /* Create dummy MonoInst's for the arguments */
4818 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4819 iargs [0]->dreg = ins->sreg1;
4820 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4821 iargs [1]->dreg = ins->sreg2;
4823 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4825 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4826 cmp->sreg1 = call->dreg;
4828 MONO_ADD_INS (cfg->cbb, cmp);
4830 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4831 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4832 br->inst_true_bb = ins->next->inst_true_bb;
4833 br->inst_false_bb = ins->next->inst_false_bb;
4834 MONO_ADD_INS (cfg->cbb, br);
4836 /* The call sequence might include fp ins */
4839 /* Skip fbcc or fccc */
4840 NULLIFY_INS (ins->next);
4848 MonoJitICallInfo *info;
4849 MonoInst *iargs [2];
4852 /* Convert fccc to icall+icompare+iceq */
4854 info = mono_find_jit_opcode_emulation (ins->opcode);
4857 /* Create dummy MonoInst's for the arguments */
4858 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4859 iargs [0]->dreg = ins->sreg1;
4860 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4861 iargs [1]->dreg = ins->sreg2;
4863 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4866 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4868 /* The call sequence might include fp ins */
4873 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4874 mono_print_ins (ins);
4875 g_assert_not_reached ();
4880 g_assert (cfg->cbb == first_bb);
4882 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4883 /* Replace the original instruction with the new code sequence */
4885 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4886 first_bb->code = first_bb->last_ins = NULL;
4887 first_bb->in_count = first_bb->out_count = 0;
4888 cfg->cbb = first_bb;
4895 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4898 mono_decompose_long_opts (cfg);
4904 * mono_method_to_ir: translates IL into basic blocks containing trees
4907 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4908 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4909 guint inline_offset, gboolean is_virtual_call)
4911 MonoInst *ins, **sp, **stack_start;
4912 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4913 MonoMethod *cmethod, *method_definition;
4914 MonoInst **arg_array;
4915 MonoMethodHeader *header;
4917 guint32 token, ins_flag;
4919 MonoClass *constrained_call = NULL;
4920 unsigned char *ip, *end, *target, *err_pos;
4921 static double r8_0 = 0.0;
4922 MonoMethodSignature *sig;
4923 MonoGenericContext *generic_context = NULL;
4924 MonoGenericContainer *generic_container = NULL;
4925 MonoType **param_types;
4926 GList *bb_recheck = NULL, *tmp;
4927 int i, n, start_new_bblock, dreg;
4928 int num_calls = 0, inline_costs = 0;
4929 int breakpoint_id = 0;
4931 MonoBoolean security, pinvoke;
4932 MonoSecurityManager* secman = NULL;
4933 MonoDeclSecurityActions actions;
4934 GSList *class_inits = NULL;
4935 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4938 /* serialization and xdomain stuff may need access to private fields and methods */
4939 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4940 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4941 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4942 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4943 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4944 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4946 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4948 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4949 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4950 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4951 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4953 image = method->klass->image;
4954 header = mono_method_get_header (method);
4955 generic_container = mono_method_get_generic_container (method);
4956 sig = mono_method_signature (method);
4957 num_args = sig->hasthis + sig->param_count;
4958 ip = (unsigned char*)header->code;
4959 cfg->cil_start = ip;
4960 end = ip + header->code_size;
4961 mono_jit_stats.cil_code_size += header->code_size;
4963 method_definition = method;
4964 while (method_definition->is_inflated) {
4965 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4966 method_definition = imethod->declaring;
4969 /* SkipVerification is not allowed if core-clr is enabled */
4970 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4972 dont_verify_stloc = TRUE;
4975 if (!dont_verify && mini_method_verify (cfg, method_definition))
4976 goto exception_exit;
4978 if (sig->is_inflated)
4979 generic_context = mono_method_get_context (method);
4980 else if (generic_container)
4981 generic_context = &generic_container->context;
4982 cfg->generic_context = generic_context;
4984 if (!cfg->generic_sharing_context)
4985 g_assert (!sig->has_type_parameters);
4987 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4988 g_assert (method->is_inflated);
4989 g_assert (mono_method_get_context (method)->method_inst);
4991 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4992 g_assert (sig->generic_param_count);
4994 if (cfg->method == method) {
4995 cfg->real_offset = 0;
4997 cfg->real_offset = inline_offset;
5000 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5001 cfg->cil_offset_to_bb_len = header->code_size;
5003 cfg->current_method = method;
5005 if (cfg->verbose_level > 2)
5006 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5008 dont_inline = g_list_prepend (dont_inline, method);
5009 if (cfg->method == method) {
5011 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5012 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5015 NEW_BBLOCK (cfg, start_bblock);
5016 cfg->bb_entry = start_bblock;
5017 start_bblock->cil_code = NULL;
5018 start_bblock->cil_length = 0;
5021 NEW_BBLOCK (cfg, end_bblock);
5022 cfg->bb_exit = end_bblock;
5023 end_bblock->cil_code = NULL;
5024 end_bblock->cil_length = 0;
5025 g_assert (cfg->num_bblocks == 2);
5027 arg_array = cfg->args;
5029 if (header->num_clauses) {
5030 cfg->spvars = g_hash_table_new (NULL, NULL);
5031 cfg->exvars = g_hash_table_new (NULL, NULL);
5033 /* handle exception clauses */
5034 for (i = 0; i < header->num_clauses; ++i) {
5035 MonoBasicBlock *try_bb;
5036 MonoExceptionClause *clause = &header->clauses [i];
5037 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5038 try_bb->real_offset = clause->try_offset;
5039 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5040 tblock->real_offset = clause->handler_offset;
5041 tblock->flags |= BB_EXCEPTION_HANDLER;
5043 link_bblock (cfg, try_bb, tblock);
5045 if (*(ip + clause->handler_offset) == CEE_POP)
5046 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5048 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5049 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5050 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5051 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5052 MONO_ADD_INS (tblock, ins);
5054 /* todo: is a fault block unsafe to optimize? */
5055 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5056 tblock->flags |= BB_EXCEPTION_UNSAFE;
5060 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5062 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5064 /* catch and filter blocks get the exception object on the stack */
5065 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5066 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5067 MonoInst *dummy_use;
5069 /* mostly like handle_stack_args (), but just sets the input args */
5070 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5071 tblock->in_scount = 1;
5072 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5073 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5076 * Add a dummy use for the exvar so its liveness info will be
5080 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5082 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5083 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5084 tblock->real_offset = clause->data.filter_offset;
5085 tblock->in_scount = 1;
5086 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5087 /* The filter block shares the exvar with the handler block */
5088 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5089 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5090 MONO_ADD_INS (tblock, ins);
5094 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5095 clause->data.catch_class &&
5096 cfg->generic_sharing_context &&
5097 mono_class_check_context_used (clause->data.catch_class)) {
5098 if (mono_method_get_context (method)->method_inst)
5099 GENERIC_SHARING_FAILURE (CEE_NOP);
5102 * In shared generic code with catch
5103 * clauses containing type variables
5104 * the exception handling code has to
5105 * be able to get to the rgctx.
5106 * Therefore we have to make sure that
5107 * the vtable/mrgctx argument (for
5108 * static or generic methods) or the
5109 * "this" argument (for non-static
5110 * methods) are live.
5112 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5113 mini_method_get_context (method)->method_inst) {
5114 mono_get_vtable_var (cfg);
5116 MonoInst *dummy_use;
5118 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5123 arg_array = alloca (sizeof (MonoInst *) * num_args);
5124 cfg->cbb = start_bblock;
5125 mono_save_args (cfg, sig, inline_args, arg_array);
5126 cfg->args = arg_array;
5129 /* FIRST CODE BLOCK */
5130 NEW_BBLOCK (cfg, bblock);
5131 bblock->cil_code = ip;
5135 ADD_BBLOCK (cfg, bblock);
5137 if (cfg->method == method) {
5138 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5139 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5140 MONO_INST_NEW (cfg, ins, OP_BREAK);
5141 MONO_ADD_INS (bblock, ins);
5145 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5146 secman = mono_security_manager_get_methods ();
5148 security = (secman && mono_method_has_declsec (method));
5149 /* at this point having security doesn't mean we have any code to generate */
5150 if (security && (cfg->method == method)) {
5151 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5152 * And we do not want to enter the next section (with allocation) if we
5153 * have nothing to generate */
5154 security = mono_declsec_get_demands (method, &actions);
5157 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5158 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5160 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5161 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5162 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5164 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5165 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5169 mono_custom_attrs_free (custom);
5172 custom = mono_custom_attrs_from_class (wrapped->klass);
5173 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5177 mono_custom_attrs_free (custom);
5180 /* not a P/Invoke after all */
5185 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5186 /* we use a separate basic block for the initialization code */
5187 NEW_BBLOCK (cfg, init_localsbb);
5188 cfg->bb_init = init_localsbb;
5189 init_localsbb->real_offset = cfg->real_offset;
5190 start_bblock->next_bb = init_localsbb;
5191 init_localsbb->next_bb = bblock;
5192 link_bblock (cfg, start_bblock, init_localsbb);
5193 link_bblock (cfg, init_localsbb, bblock);
5195 cfg->cbb = init_localsbb;
5197 start_bblock->next_bb = bblock;
5198 link_bblock (cfg, start_bblock, bblock);
5201 /* at this point we know, if security is TRUE, that some code needs to be generated */
5202 if (security && (cfg->method == method)) {
5205 mono_jit_stats.cas_demand_generation++;
5207 if (actions.demand.blob) {
5208 /* Add code for SecurityAction.Demand */
5209 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5210 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5211 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5212 mono_emit_method_call (cfg, secman->demand, args, NULL);
5214 if (actions.noncasdemand.blob) {
5215 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5216 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5217 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5218 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5219 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5220 mono_emit_method_call (cfg, secman->demand, args, NULL);
5222 if (actions.demandchoice.blob) {
5223 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5224 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5225 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5226 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5227 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5231 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5233 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5236 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5237 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5238 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5239 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5240 if (!(method->klass && method->klass->image &&
5241 mono_security_core_clr_is_platform_image (method->klass->image))) {
5242 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5246 if (!method_is_safe (method))
5247 emit_throw_verification_exception (cfg, bblock, ip);
5250 if (header->code_size == 0)
5253 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5258 if (cfg->method == method)
5259 mono_debug_init_method (cfg, bblock, breakpoint_id);
5261 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5263 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5264 for (n = 0; n < sig->param_count; ++n)
5265 param_types [n + sig->hasthis] = sig->params [n];
5266 cfg->arg_types = param_types;
5267 for (n = 0; n < header->num_locals; ++n) {
5268 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5273 /* add a check for this != NULL to inlined methods */
5274 if (is_virtual_call) {
5277 NEW_ARGLOAD (cfg, arg_ins, 0);
5278 MONO_ADD_INS (cfg->cbb, arg_ins);
5279 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5280 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5281 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5284 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5285 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5288 start_new_bblock = 0;
5292 if (cfg->method == method)
5293 cfg->real_offset = ip - header->code;
5295 cfg->real_offset = inline_offset;
5300 if (start_new_bblock) {
5301 bblock->cil_length = ip - bblock->cil_code;
5302 if (start_new_bblock == 2) {
5303 g_assert (ip == tblock->cil_code);
5305 GET_BBLOCK (cfg, tblock, ip);
5307 bblock->next_bb = tblock;
5310 start_new_bblock = 0;
5311 for (i = 0; i < bblock->in_scount; ++i) {
5312 if (cfg->verbose_level > 3)
5313 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5314 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5318 g_slist_free (class_inits);
5321 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5322 link_bblock (cfg, bblock, tblock);
5323 if (sp != stack_start) {
5324 handle_stack_args (cfg, stack_start, sp - stack_start);
5326 CHECK_UNVERIFIABLE (cfg);
5328 bblock->next_bb = tblock;
5331 for (i = 0; i < bblock->in_scount; ++i) {
5332 if (cfg->verbose_level > 3)
5333 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5334 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5337 g_slist_free (class_inits);
5342 bblock->real_offset = cfg->real_offset;
5344 if ((cfg->method == method) && cfg->coverage_info) {
5345 guint32 cil_offset = ip - header->code;
5346 cfg->coverage_info->data [cil_offset].cil_code = ip;
5348 /* TODO: Use an increment here */
5349 #if defined(__i386__)
5350 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5351 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5353 MONO_ADD_INS (cfg->cbb, ins);
5355 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5356 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5360 if (cfg->verbose_level > 3)
5361 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5366 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5368 MONO_ADD_INS (bblock, ins);
5374 CHECK_STACK_OVF (1);
5375 n = (*ip)-CEE_LDARG_0;
5377 EMIT_NEW_ARGLOAD (cfg, ins, n);
5385 CHECK_STACK_OVF (1);
5386 n = (*ip)-CEE_LDLOC_0;
5388 EMIT_NEW_LOCLOAD (cfg, ins, n);
5399 n = (*ip)-CEE_STLOC_0;
5402 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5405 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5406 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5407 /* Optimize reg-reg moves away */
5409 * Can't optimize other opcodes, since sp[0] might point to
5410 * the last ins of a decomposed opcode.
5412 sp [0]->dreg = (cfg)->locals [n]->dreg;
5414 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5422 CHECK_STACK_OVF (1);
5425 EMIT_NEW_ARGLOAD (cfg, ins, n);
5431 CHECK_STACK_OVF (1);
5434 NEW_ARGLOADA (cfg, ins, n);
5435 MONO_ADD_INS (cfg->cbb, ins);
5445 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5447 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5452 CHECK_STACK_OVF (1);
5455 EMIT_NEW_LOCLOAD (cfg, ins, n);
5461 CHECK_STACK_OVF (1);
5462 CHECK_LOCAL (ip [1]);
5465 * ldloca inhibits many optimizations so try to get rid of it in common
5468 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5469 gboolean skip = FALSE;
5471 /* From the INITOBJ case */
5472 token = read32 (ip + 4);
5473 klass = mini_get_class (method, token, generic_context);
5474 CHECK_TYPELOAD (klass);
5475 if (generic_class_is_reference_type (cfg, klass)) {
5476 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5477 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5478 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5479 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5480 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5492 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5500 CHECK_LOCAL (ip [1]);
5501 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5503 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5508 CHECK_STACK_OVF (1);
5509 EMIT_NEW_PCONST (cfg, ins, NULL);
5510 ins->type = STACK_OBJ;
5515 CHECK_STACK_OVF (1);
5516 EMIT_NEW_ICONST (cfg, ins, -1);
5529 CHECK_STACK_OVF (1);
5530 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5536 CHECK_STACK_OVF (1);
5538 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5544 CHECK_STACK_OVF (1);
5545 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5551 CHECK_STACK_OVF (1);
5552 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5553 ins->type = STACK_I8;
5554 ins->dreg = alloc_dreg (cfg, STACK_I8);
5556 ins->inst_l = (gint64)read64 (ip);
5557 MONO_ADD_INS (bblock, ins);
5563 /* FIXME: we should really allocate this only late in the compilation process */
5564 mono_domain_lock (cfg->domain);
5565 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5566 mono_domain_unlock (cfg->domain);
5568 CHECK_STACK_OVF (1);
5569 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5570 ins->type = STACK_R8;
5571 ins->dreg = alloc_dreg (cfg, STACK_R8);
5575 MONO_ADD_INS (bblock, ins);
5583 /* FIXME: we should really allocate this only late in the compilation process */
5584 mono_domain_lock (cfg->domain);
5585 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5586 mono_domain_unlock (cfg->domain);
5588 CHECK_STACK_OVF (1);
5589 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5590 ins->type = STACK_R8;
5591 ins->dreg = alloc_dreg (cfg, STACK_R8);
5595 MONO_ADD_INS (bblock, ins);
5602 MonoInst *temp, *store;
5604 CHECK_STACK_OVF (1);
5608 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5609 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5611 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5614 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5627 if (sp [0]->type == STACK_R8)
5628 /* we need to pop the value from the x86 FP stack */
5629 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5636 if (stack_start != sp)
5638 token = read32 (ip + 1);
5639 /* FIXME: check the signature matches */
5640 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5645 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5646 GENERIC_SHARING_FAILURE (CEE_JMP);
5648 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5649 if (check_linkdemand (cfg, method, cmethod))
5651 CHECK_CFG_EXCEPTION;
5656 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5659 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5661 /* Handle tail calls similarly to calls */
5662 n = fsig->param_count + fsig->hasthis;
5664 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5665 call->method = cmethod;
5666 call->tail_call = TRUE;
5667 call->signature = mono_method_signature (cmethod);
5668 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5669 call->inst.inst_p0 = cmethod;
5670 for (i = 0; i < n; ++i)
5671 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5673 mono_arch_emit_call (cfg, call);
5674 MONO_ADD_INS (bblock, (MonoInst*)call);
5677 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5678 ins = (MonoInst*)call;
5679 ins->inst_p0 = cmethod;
5680 MONO_ADD_INS (bblock, ins);
5684 start_new_bblock = 1;
5689 case CEE_CALLVIRT: {
5690 MonoInst *addr = NULL;
5691 MonoMethodSignature *fsig = NULL;
5693 int virtual = *ip == CEE_CALLVIRT;
5694 int calli = *ip == CEE_CALLI;
5695 gboolean pass_imt_from_rgctx = FALSE;
5696 MonoInst *imt_arg = NULL;
5697 gboolean pass_vtable = FALSE;
5698 gboolean pass_mrgctx = FALSE;
5699 MonoInst *vtable_arg = NULL;
5700 gboolean check_this = FALSE;
5703 token = read32 (ip + 1);
5710 if (method->wrapper_type != MONO_WRAPPER_NONE)
5711 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5713 fsig = mono_metadata_parse_signature (image, token);
5715 n = fsig->param_count + fsig->hasthis;
5717 MonoMethod *cil_method;
5719 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5720 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5721 cil_method = cmethod;
5722 } else if (constrained_call) {
5723 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5725 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5726 cil_method = cmethod;
5731 if (!dont_verify && !cfg->skip_visibility) {
5732 MonoMethod *target_method = cil_method;
5733 if (method->is_inflated) {
5734 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5736 if (!mono_method_can_access_method (method_definition, target_method) &&
5737 !mono_method_can_access_method (method, cil_method))
5738 METHOD_ACCESS_FAILURE;
5741 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5742 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5744 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5745 /* MS.NET seems to silently convert this to a callvirt */
5748 if (!cmethod->klass->inited)
5749 if (!mono_class_init (cmethod->klass))
5752 if (mono_method_signature (cmethod)->pinvoke) {
5753 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5754 fsig = mono_method_signature (wrapper);
5755 } else if (constrained_call) {
5756 fsig = mono_method_signature (cmethod);
5758 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5761 mono_save_token_info (cfg, image, token, cmethod);
5763 n = fsig->param_count + fsig->hasthis;
5765 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5766 if (check_linkdemand (cfg, method, cmethod))
5768 CHECK_CFG_EXCEPTION;
5771 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5772 mini_class_is_system_array (cmethod->klass)) {
5773 array_rank = cmethod->klass->rank;
5776 if (cmethod->string_ctor)
5777 g_assert_not_reached ();
5780 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5783 if (!cfg->generic_sharing_context && cmethod)
5784 g_assert (!mono_method_check_context_used (cmethod));
5788 //g_assert (!virtual || fsig->hasthis);
5792 if (constrained_call) {
5794 * We have the `constrained.' prefix opcode.
5796 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5800 * The type parameter is instantiated as a valuetype,
5801 * but that type doesn't override the method we're
5802 * calling, so we need to box `this'.
5804 dreg = alloc_dreg (cfg, STACK_VTYPE);
5805 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5806 ins->klass = constrained_call;
5807 sp [0] = handle_box (cfg, ins, constrained_call);
5808 } else if (!constrained_call->valuetype) {
5809 int dreg = alloc_preg (cfg);
5812 * The type parameter is instantiated as a reference
5813 * type. We have a managed pointer on the stack, so
5814 * we need to dereference it here.
5816 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5817 ins->type = STACK_OBJ;
5819 } else if (cmethod->klass->valuetype)
5821 constrained_call = NULL;
5824 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5828 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5829 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5830 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5831 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5832 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5835 * Pass vtable iff target method might
5836 * be shared, which means that sharing
5837 * is enabled for its class and its
5838 * context is sharable (and it's not a
5841 if (sharing_enabled && context_sharable &&
5842 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5846 if (cmethod && mini_method_get_context (cmethod) &&
5847 mini_method_get_context (cmethod)->method_inst) {
5848 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5849 MonoGenericContext *context = mini_method_get_context (cmethod);
5850 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5852 g_assert (!pass_vtable);
5854 if (sharing_enabled && context_sharable)
5858 if (cfg->generic_sharing_context && cmethod) {
5859 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5861 context_used = mono_method_check_context_used (cmethod);
5863 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5864 /* Generic method interface
5865 calls are resolved via a
5866 helper function and don't
5868 if (!cmethod_context || !cmethod_context->method_inst)
5869 pass_imt_from_rgctx = TRUE;
5873 * If a shared method calls another
5874 * shared method then the caller must
5875 * have a generic sharing context
5876 * because the magic trampoline
5877 * requires it. FIXME: We shouldn't
5878 * have to force the vtable/mrgctx
5879 * variable here. Instead there
5880 * should be a flag in the cfg to
5881 * request a generic sharing context.
5883 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5884 mono_get_vtable_var (cfg);
5891 EMIT_GET_RGCTX (rgctx, context_used);
5892 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5894 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5896 CHECK_TYPELOAD (cmethod->klass);
5897 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5902 g_assert (!vtable_arg);
5907 EMIT_GET_RGCTX (rgctx, context_used);
5908 vtable_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5910 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5913 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5914 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5921 if (pass_imt_from_rgctx) {
5924 g_assert (!pass_vtable);
5927 EMIT_GET_RGCTX (rgctx, context_used);
5928 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5929 MONO_RGCTX_INFO_METHOD);
5935 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5936 check->sreg1 = sp [0]->dreg;
5937 MONO_ADD_INS (cfg->cbb, check);
5940 /* Calling virtual generic methods */
5941 if (cmethod && virtual &&
5942 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5943 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5944 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5945 mono_method_signature (cmethod)->generic_param_count) {
5946 MonoInst *this_temp, *this_arg_temp, *store;
5947 MonoInst *iargs [4];
5949 g_assert (mono_method_signature (cmethod)->is_inflated);
5951 /* Prevent inlining of methods that contain indirect calls */
5954 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5955 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5956 MONO_ADD_INS (bblock, store);
5958 /* FIXME: This should be a managed pointer */
5959 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5961 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5965 EMIT_GET_RGCTX (rgctx, context_used);
5966 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5967 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5968 addr = mono_emit_jit_icall (cfg,
5969 mono_helper_compile_generic_method, iargs);
5971 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5972 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5973 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5976 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5978 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5979 if (!MONO_TYPE_IS_VOID (fsig->ret))
5988 /* FIXME: runtime generic context pointer for jumps? */
5989 /* FIXME: handle this for generic sharing eventually */
5990 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5991 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5994 /* FIXME: runtime generic context pointer for jumps? */
5995 GENERIC_SHARING_FAILURE (*ip);
5997 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6000 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6001 call->tail_call = TRUE;
6002 call->method = cmethod;
6003 call->signature = mono_method_signature (cmethod);
6006 /* Handle tail calls similarly to calls */
6007 call->inst.opcode = OP_TAILCALL;
6009 mono_arch_emit_call (cfg, call);
6012 * We implement tail calls by storing the actual arguments into the
6013 * argument variables, then emitting a CEE_JMP.
6015 for (i = 0; i < n; ++i) {
6016 /* Prevent argument from being register allocated */
6017 arg_array [i]->flags |= MONO_INST_VOLATILE;
6018 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6022 ins = (MonoInst*)call;
6023 ins->inst_p0 = cmethod;
6024 ins->inst_p1 = arg_array [0];
6025 MONO_ADD_INS (bblock, ins);
6026 link_bblock (cfg, bblock, end_bblock);
6027 start_new_bblock = 1;
6028 /* skip CEE_RET as well */
6034 /* Conversion to a JIT intrinsic */
6035 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6036 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6037 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6048 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6049 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6050 mono_method_check_inlining (cfg, cmethod) &&
6051 !g_list_find (dont_inline, cmethod)) {
6053 gboolean allways = FALSE;
6055 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6056 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6057 /* Prevent inlining of methods that call wrappers */
6059 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6063 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6065 cfg->real_offset += 5;
6068 if (!MONO_TYPE_IS_VOID (fsig->ret))
6069 /* *sp is already set by inline_method */
6072 inline_costs += costs;
6078 inline_costs += 10 * num_calls++;
6080 /* Tail recursion elimination */
6081 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6082 gboolean has_vtargs = FALSE;
6085 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6088 /* keep it simple */
6089 for (i = fsig->param_count - 1; i >= 0; i--) {
6090 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6095 for (i = 0; i < n; ++i)
6096 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6097 MONO_INST_NEW (cfg, ins, OP_BR);
6098 MONO_ADD_INS (bblock, ins);
6099 tblock = start_bblock->out_bb [0];
6100 link_bblock (cfg, bblock, tblock);
6101 ins->inst_target_bb = tblock;
6102 start_new_bblock = 1;
6104 /* skip the CEE_RET, too */
6105 if (ip_in_bb (cfg, bblock, ip + 5))
6115 /* Generic sharing */
6116 /* FIXME: only do this for generic methods if
6117 they are not shared! */
6119 (cmethod->klass->valuetype ||
6120 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6121 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6122 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6123 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6124 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6125 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6130 g_assert (cfg->generic_sharing_context && cmethod);
6134 * We are compiling a call to a
6135 * generic method from shared code,
6136 * which means that we have to look up
6137 * the method in the rgctx and do an
6141 EMIT_GET_RGCTX (rgctx, context_used);
6142 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6145 /* Indirect calls */
6147 g_assert (!imt_arg);
6149 if (*ip == CEE_CALL)
6150 g_assert (context_used);
6151 else if (*ip == CEE_CALLI)
6152 g_assert (!vtable_arg);
6154 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6155 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6157 /* Prevent inlining of methods with indirect calls */
6161 #ifdef MONO_ARCH_RGCTX_REG
6163 int rgctx_reg = mono_alloc_preg (cfg);
6165 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6166 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6167 call = (MonoCallInst*)ins;
6168 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6170 GENERIC_SHARING_FAILURE (*ip);
6173 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6175 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6176 if (fsig->pinvoke && !fsig->ret->byref) {
6180 * Native code might return non register sized integers
6181 * without initializing the upper bits.
6183 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6184 case OP_LOADI1_MEMBASE:
6185 widen_op = OP_ICONV_TO_I1;
6187 case OP_LOADU1_MEMBASE:
6188 widen_op = OP_ICONV_TO_U1;
6190 case OP_LOADI2_MEMBASE:
6191 widen_op = OP_ICONV_TO_I2;
6193 case OP_LOADU2_MEMBASE:
6194 widen_op = OP_ICONV_TO_U2;
6200 if (widen_op != -1) {
6201 int dreg = alloc_preg (cfg);
6204 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6205 widen->type = ins->type;
6222 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6223 if (sp [fsig->param_count]->type == STACK_OBJ) {
6224 MonoInst *iargs [2];
6227 iargs [1] = sp [fsig->param_count];
6229 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6232 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6233 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6234 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6235 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6237 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6240 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6241 if (!cmethod->klass->element_class->valuetype && !readonly)
6242 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6245 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6248 g_assert_not_reached ();
6256 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6258 if (!MONO_TYPE_IS_VOID (fsig->ret))
6269 #ifdef MONO_ARCH_RGCTX_REG
6271 int rgctx_reg = mono_alloc_preg (cfg);
6273 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6274 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6275 call = (MonoCallInst*)ins;
6276 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6278 GENERIC_SHARING_FAILURE (*ip);
6280 } else if (imt_arg) {
6281 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6283 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6286 if (!MONO_TYPE_IS_VOID (fsig->ret))
6294 if (cfg->method != method) {
6295 /* return from inlined method */
6297 * If in_count == 0, that means the ret is unreachable due to
6298 * being preceeded by a throw. In that case, inline_method () will
6299 * handle setting the return value
6300 * (test case: test_0_inline_throw ()).
6302 if (return_var && cfg->cbb->in_count) {
6306 //g_assert (returnvar != -1);
6307 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6308 cfg->ret_var_set = TRUE;
6312 MonoType *ret_type = mono_method_signature (method)->ret;
6314 g_assert (!return_var);
6317 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6320 if (!cfg->vret_addr) {
6323 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6325 EMIT_NEW_RETLOADA (cfg, ret_addr);
6327 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6328 ins->klass = mono_class_from_mono_type (ret_type);
6331 #ifdef MONO_ARCH_SOFT_FLOAT
6332 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6333 MonoInst *iargs [1];
6337 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6338 mono_arch_emit_setret (cfg, method, conv);
6340 mono_arch_emit_setret (cfg, method, *sp);
6343 mono_arch_emit_setret (cfg, method, *sp);
6348 if (sp != stack_start)
6350 MONO_INST_NEW (cfg, ins, OP_BR);
6352 ins->inst_target_bb = end_bblock;
6353 MONO_ADD_INS (bblock, ins);
6354 link_bblock (cfg, bblock, end_bblock);
6355 start_new_bblock = 1;
6359 MONO_INST_NEW (cfg, ins, OP_BR);
6361 target = ip + 1 + (signed char)(*ip);
6363 GET_BBLOCK (cfg, tblock, target);
6364 link_bblock (cfg, bblock, tblock);
6365 CHECK_BBLOCK (target, ip, tblock);
6366 ins->inst_target_bb = tblock;
6367 if (sp != stack_start) {
6368 handle_stack_args (cfg, stack_start, sp - stack_start);
6370 CHECK_UNVERIFIABLE (cfg);
6372 MONO_ADD_INS (bblock, ins);
6373 start_new_bblock = 1;
6374 inline_costs += BRANCH_COST;
6388 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6390 target = ip + 1 + *(signed char*)ip;
6396 inline_costs += BRANCH_COST;
6400 MONO_INST_NEW (cfg, ins, OP_BR);
6403 target = ip + 4 + (gint32)read32(ip);
6405 GET_BBLOCK (cfg, tblock, target);
6406 link_bblock (cfg, bblock, tblock);
6407 CHECK_BBLOCK (target, ip, tblock);
6408 ins->inst_target_bb = tblock;
6409 if (sp != stack_start) {
6410 handle_stack_args (cfg, stack_start, sp - stack_start);
6412 CHECK_UNVERIFIABLE (cfg);
6415 MONO_ADD_INS (bblock, ins);
6417 start_new_bblock = 1;
6418 inline_costs += BRANCH_COST;
6425 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6426 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6427 guint32 opsize = is_short ? 1 : 4;
6429 CHECK_OPSIZE (opsize);
6431 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6434 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6439 GET_BBLOCK (cfg, tblock, target);
6440 link_bblock (cfg, bblock, tblock);
6441 CHECK_BBLOCK (target, ip, tblock);
6442 GET_BBLOCK (cfg, tblock, ip);
6443 link_bblock (cfg, bblock, tblock);
6445 if (sp != stack_start) {
6446 handle_stack_args (cfg, stack_start, sp - stack_start);
6447 CHECK_UNVERIFIABLE (cfg);
6450 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6451 cmp->sreg1 = sp [0]->dreg;
6452 type_from_op (cmp, sp [0], NULL);
6455 #if SIZEOF_VOID_P == 4
6456 if (cmp->opcode == OP_LCOMPARE_IMM) {
6457 /* Convert it to OP_LCOMPARE */
6458 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6459 ins->type = STACK_I8;
6460 ins->dreg = alloc_dreg (cfg, STACK_I8);
6462 MONO_ADD_INS (bblock, ins);
6463 cmp->opcode = OP_LCOMPARE;
6464 cmp->sreg2 = ins->dreg;
6467 MONO_ADD_INS (bblock, cmp);
6469 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6470 type_from_op (ins, sp [0], NULL);
6471 MONO_ADD_INS (bblock, ins);
6472 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6473 GET_BBLOCK (cfg, tblock, target);
6474 ins->inst_true_bb = tblock;
6475 GET_BBLOCK (cfg, tblock, ip);
6476 ins->inst_false_bb = tblock;
6477 start_new_bblock = 2;
6480 inline_costs += BRANCH_COST;
6495 MONO_INST_NEW (cfg, ins, *ip);
6497 target = ip + 4 + (gint32)read32(ip);
6503 inline_costs += BRANCH_COST;
6507 MonoBasicBlock **targets;
6508 MonoBasicBlock *default_bblock;
6509 MonoJumpInfoBBTable *table;
6511 int offset_reg = alloc_preg (cfg);
6512 int target_reg = alloc_preg (cfg);
6513 int table_reg = alloc_preg (cfg);
6514 int sum_reg = alloc_preg (cfg);
6519 n = read32 (ip + 1);
6522 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6526 CHECK_OPSIZE (n * sizeof (guint32));
6527 target = ip + n * sizeof (guint32);
6529 GET_BBLOCK (cfg, default_bblock, target);
6531 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6532 for (i = 0; i < n; ++i) {
6533 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6534 targets [i] = tblock;
6538 if (sp != stack_start) {
6540 * Link the current bb with the targets as well, so handle_stack_args
6541 * will set their in_stack correctly.
6543 link_bblock (cfg, bblock, default_bblock);
6544 for (i = 0; i < n; ++i)
6545 link_bblock (cfg, bblock, targets [i]);
6547 handle_stack_args (cfg, stack_start, sp - stack_start);
6549 CHECK_UNVERIFIABLE (cfg);
6552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6556 for (i = 0; i < n; ++i)
6557 link_bblock (cfg, bblock, targets [i]);
6559 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6560 table->table = targets;
6561 table->table_size = n;
6564 /* ARM implements SWITCH statements differently */
6565 /* FIXME: Make it use the generic implementation */
6566 /* the backend code will deal with aot vs normal case */
6567 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6568 ins->sreg1 = src1->dreg;
6569 ins->inst_p0 = table;
6570 ins->inst_many_bb = targets;
6571 ins->klass = GUINT_TO_POINTER (n);
6572 MONO_ADD_INS (cfg->cbb, ins);
6574 if (sizeof (gpointer) == 8)
6575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6579 #if SIZEOF_VOID_P == 8
6580 /* The upper word might not be zero, and we add it to a 64 bit address later */
6581 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6584 if (cfg->compile_aot) {
6585 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6587 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6588 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6589 ins->inst_p0 = table;
6590 ins->dreg = table_reg;
6591 MONO_ADD_INS (cfg->cbb, ins);
6594 /* FIXME: Use load_memindex */
6595 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6597 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6599 start_new_bblock = 1;
6600 inline_costs += (BRANCH_COST * 2);
6620 dreg = alloc_freg (cfg);
6623 dreg = alloc_lreg (cfg);
6626 dreg = alloc_preg (cfg);
6629 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6630 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6631 ins->flags |= ins_flag;
6633 MONO_ADD_INS (bblock, ins);
6648 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6649 ins->flags |= ins_flag;
6651 MONO_ADD_INS (bblock, ins);
6659 MONO_INST_NEW (cfg, ins, (*ip));
6661 ins->sreg1 = sp [0]->dreg;
6662 ins->sreg2 = sp [1]->dreg;
6663 type_from_op (ins, sp [0], sp [1]);
6665 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6667 /* Use the immediate opcodes if possible */
6668 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6669 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6670 if (imm_opcode != -1) {
6671 ins->opcode = imm_opcode;
6672 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6675 sp [1]->opcode = OP_NOP;
6679 MONO_ADD_INS ((cfg)->cbb, (ins));
6682 mono_decompose_opcode (cfg, ins);
6699 MONO_INST_NEW (cfg, ins, (*ip));
6701 ins->sreg1 = sp [0]->dreg;
6702 ins->sreg2 = sp [1]->dreg;
6703 type_from_op (ins, sp [0], sp [1]);
6705 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6706 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6708 /* FIXME: Pass opcode to is_inst_imm */
6710 /* Use the immediate opcodes if possible */
6711 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6714 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6715 if (imm_opcode != -1) {
6716 ins->opcode = imm_opcode;
6717 if (sp [1]->opcode == OP_I8CONST) {
6718 #if SIZEOF_VOID_P == 8
6719 ins->inst_imm = sp [1]->inst_l;
6721 ins->inst_ls_word = sp [1]->inst_ls_word;
6722 ins->inst_ms_word = sp [1]->inst_ms_word;
6726 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6729 sp [1]->opcode = OP_NOP;
6732 MONO_ADD_INS ((cfg)->cbb, (ins));
6735 mono_decompose_opcode (cfg, ins);
6748 case CEE_CONV_OVF_I8:
6749 case CEE_CONV_OVF_U8:
6753 /* Special case this earlier so we have long constants in the IR */
6754 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6755 int data = sp [-1]->inst_c0;
6756 sp [-1]->opcode = OP_I8CONST;
6757 sp [-1]->type = STACK_I8;
6758 #if SIZEOF_VOID_P == 8
6759 if ((*ip) == CEE_CONV_U8)
6760 sp [-1]->inst_c0 = (guint32)data;
6762 sp [-1]->inst_c0 = data;
6764 sp [-1]->inst_ls_word = data;
6765 if ((*ip) == CEE_CONV_U8)
6766 sp [-1]->inst_ms_word = 0;
6768 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6770 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6777 case CEE_CONV_OVF_I4:
6778 case CEE_CONV_OVF_I1:
6779 case CEE_CONV_OVF_I2:
6780 case CEE_CONV_OVF_I:
6781 case CEE_CONV_OVF_U:
6784 if (sp [-1]->type == STACK_R8) {
6785 ADD_UNOP (CEE_CONV_OVF_I8);
6792 case CEE_CONV_OVF_U1:
6793 case CEE_CONV_OVF_U2:
6794 case CEE_CONV_OVF_U4:
6797 if (sp [-1]->type == STACK_R8) {
6798 ADD_UNOP (CEE_CONV_OVF_U8);
6805 case CEE_CONV_OVF_I1_UN:
6806 case CEE_CONV_OVF_I2_UN:
6807 case CEE_CONV_OVF_I4_UN:
6808 case CEE_CONV_OVF_I8_UN:
6809 case CEE_CONV_OVF_U1_UN:
6810 case CEE_CONV_OVF_U2_UN:
6811 case CEE_CONV_OVF_U4_UN:
6812 case CEE_CONV_OVF_U8_UN:
6813 case CEE_CONV_OVF_I_UN:
6814 case CEE_CONV_OVF_U_UN:
6824 case CEE_ADD_OVF_UN:
6826 case CEE_MUL_OVF_UN:
6828 case CEE_SUB_OVF_UN:
6836 token = read32 (ip + 1);
6837 klass = mini_get_class (method, token, generic_context);
6838 CHECK_TYPELOAD (klass);
6840 if (generic_class_is_reference_type (cfg, klass)) {
6841 MonoInst *store, *load;
6842 int dreg = alloc_preg (cfg);
6844 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6845 load->flags |= ins_flag;
6846 MONO_ADD_INS (cfg->cbb, load);
6848 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6849 store->flags |= ins_flag;
6850 MONO_ADD_INS (cfg->cbb, store);
6852 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6864 token = read32 (ip + 1);
6865 klass = mini_get_class (method, token, generic_context);
6866 CHECK_TYPELOAD (klass);
6868 /* Optimize the common ldobj+stloc combination */
6878 loc_index = ip [5] - CEE_STLOC_0;
6885 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6886 CHECK_LOCAL (loc_index);
6888 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6889 ins->dreg = cfg->locals [loc_index]->dreg;
6895 /* Optimize the ldobj+stobj combination */
6896 /* The reference case ends up being a load+store anyway */
6897 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6902 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6909 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6918 CHECK_STACK_OVF (1);
6920 n = read32 (ip + 1);
6922 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6923 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6924 ins->type = STACK_OBJ;
6927 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6928 MonoInst *iargs [1];
6930 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6931 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6933 if (cfg->opt & MONO_OPT_SHARED) {
6934 MonoInst *iargs [3];
6936 if (cfg->compile_aot) {
6937 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6939 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6940 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6941 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6942 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6943 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6945 if (bblock->out_of_line) {
6946 MonoInst *iargs [2];
6948 if (cfg->method->klass->image == mono_defaults.corlib) {
6950 * Avoid relocations in AOT and save some space by using a
6951 * version of helper_ldstr specialized to mscorlib.
6953 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6954 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6956 /* Avoid creating the string object */
6957 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6958 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6959 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6963 if (cfg->compile_aot) {
6964 NEW_LDSTRCONST (cfg, ins, image, n);
6966 MONO_ADD_INS (bblock, ins);
6969 NEW_PCONST (cfg, ins, NULL);
6970 ins->type = STACK_OBJ;
6971 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6973 MONO_ADD_INS (bblock, ins);
6982 MonoInst *iargs [2];
6983 MonoMethodSignature *fsig;
6988 token = read32 (ip + 1);
6989 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6992 fsig = mono_method_get_signature (cmethod, image, token);
6994 mono_save_token_info (cfg, image, token, cmethod);
6996 if (!mono_class_init (cmethod->klass))
6999 if (cfg->generic_sharing_context)
7000 context_used = mono_method_check_context_used (cmethod);
7002 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7003 if (check_linkdemand (cfg, method, cmethod))
7005 CHECK_CFG_EXCEPTION;
7006 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7007 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7010 n = fsig->param_count;
7014 * Generate smaller code for the common newobj <exception> instruction in
7015 * argument checking code.
7017 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7018 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7019 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7020 MonoInst *iargs [3];
7024 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7027 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7031 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7036 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7039 g_assert_not_reached ();
7047 /* move the args to allow room for 'this' in the first position */
7053 /* check_call_signature () requires sp[0] to be set */
7054 this_ins.type = STACK_OBJ;
7056 if (check_call_signature (cfg, fsig, sp))
7061 if (mini_class_is_system_array (cmethod->klass)) {
7062 g_assert (!context_used);
7063 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7065 /* Avoid varargs in the common case */
7066 if (fsig->param_count == 1)
7067 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7068 else if (fsig->param_count == 2)
7069 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7071 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7072 } else if (cmethod->string_ctor) {
7073 g_assert (!context_used);
7074 /* we simply pass a null pointer */
7075 EMIT_NEW_PCONST (cfg, *sp, NULL);
7076 /* now call the string ctor */
7077 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7079 MonoInst* callvirt_this_arg = NULL;
7081 if (cmethod->klass->valuetype) {
7082 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7083 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7084 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7089 * The code generated by mini_emit_virtual_call () expects
7090 * iargs [0] to be a boxed instance, but luckily the vcall
7091 * will be transformed into a normal call there.
7093 } else if (context_used) {
7094 MonoInst *rgctx, *data;
7097 EMIT_GET_RGCTX (rgctx, context_used);
7098 if (cfg->opt & MONO_OPT_SHARED)
7099 rgctx_info = MONO_RGCTX_INFO_KLASS;
7101 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7102 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7104 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7107 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7109 CHECK_TYPELOAD (cmethod->klass);
7112 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7113 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7114 * As a workaround, we call class cctors before allocating objects.
7116 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7117 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7118 if (cfg->verbose_level > 2)
7119 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7120 class_inits = g_slist_prepend (class_inits, vtable);
7123 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7128 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7130 /* Now call the actual ctor */
7131 /* Avoid virtual calls to ctors if possible */
7132 if (cmethod->klass->marshalbyref)
7133 callvirt_this_arg = sp [0];
7135 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7136 mono_method_check_inlining (cfg, cmethod) &&
7137 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7138 !g_list_find (dont_inline, cmethod)) {
7141 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7142 cfg->real_offset += 5;
7145 inline_costs += costs - 5;
7148 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7150 } else if (context_used &&
7151 (cmethod->klass->valuetype ||
7152 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7153 MonoInst *rgctx, *cmethod_addr;
7155 g_assert (!callvirt_this_arg);
7157 EMIT_GET_RGCTX (rgctx, context_used);
7158 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7159 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7161 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7164 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7168 if (alloc == NULL) {
7170 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7171 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7185 token = read32 (ip + 1);
7186 klass = mini_get_class (method, token, generic_context);
7187 CHECK_TYPELOAD (klass);
7188 if (sp [0]->type != STACK_OBJ)
7191 if (cfg->generic_sharing_context)
7192 context_used = mono_class_check_context_used (klass);
7195 MonoInst *rgctx, *args [2];
7197 g_assert (!method->klass->valuetype);
7203 EMIT_GET_RGCTX (rgctx, context_used);
7204 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7205 MONO_RGCTX_INFO_KLASS);
7207 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7211 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7212 MonoMethod *mono_castclass;
7213 MonoInst *iargs [1];
7216 mono_castclass = mono_marshal_get_castclass (klass);
7219 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7220 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7221 g_assert (costs > 0);
7224 cfg->real_offset += 5;
7229 inline_costs += costs;
7232 ins = handle_castclass (cfg, klass, *sp);
7242 token = read32 (ip + 1);
7243 klass = mini_get_class (method, token, generic_context);
7244 CHECK_TYPELOAD (klass);
7245 if (sp [0]->type != STACK_OBJ)
7248 if (cfg->generic_sharing_context)
7249 context_used = mono_class_check_context_used (klass);
7252 MonoInst *rgctx, *args [2];
7258 EMIT_GET_RGCTX (rgctx, context_used);
7259 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7261 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7265 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7266 MonoMethod *mono_isinst;
7267 MonoInst *iargs [1];
7270 mono_isinst = mono_marshal_get_isinst (klass);
7273 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7274 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7275 g_assert (costs > 0);
7278 cfg->real_offset += 5;
7283 inline_costs += costs;
7286 ins = handle_isinst (cfg, klass, *sp);
7293 case CEE_UNBOX_ANY: {
7294 MonoInst *rgctx = NULL;
7299 token = read32 (ip + 1);
7300 klass = mini_get_class (method, token, generic_context);
7301 CHECK_TYPELOAD (klass);
7303 mono_save_token_info (cfg, image, token, klass);
7305 if (cfg->generic_sharing_context)
7306 context_used = mono_class_check_context_used (klass);
7308 if (generic_class_is_reference_type (cfg, klass)) {
7311 MonoInst *iargs [2];
7313 g_assert (!method->klass->valuetype);
7318 EMIT_GET_RGCTX (rgctx, context_used);
7319 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7320 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7324 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7325 MonoMethod *mono_castclass;
7326 MonoInst *iargs [1];
7329 mono_castclass = mono_marshal_get_castclass (klass);
7332 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7333 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7335 g_assert (costs > 0);
7338 cfg->real_offset += 5;
7342 inline_costs += costs;
7344 ins = handle_castclass (cfg, klass, *sp);
7353 EMIT_GET_RGCTX (rgctx, context_used);
7355 if (mono_class_is_nullable (klass)) {
7356 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7363 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7369 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7382 token = read32 (ip + 1);
7383 klass = mini_get_class (method, token, generic_context);
7384 CHECK_TYPELOAD (klass);
7386 mono_save_token_info (cfg, image, token, klass);
7388 if (cfg->generic_sharing_context)
7389 context_used = mono_class_check_context_used (klass);
7391 if (generic_class_is_reference_type (cfg, klass)) {
7397 if (klass == mono_defaults.void_class)
7399 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7401 /* frequent check in generic code: box (struct), brtrue */
7402 if (!mono_class_is_nullable (klass) &&
7403 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7404 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7406 MONO_INST_NEW (cfg, ins, OP_BR);
7407 if (*ip == CEE_BRTRUE_S) {
7410 target = ip + 1 + (signed char)(*ip);
7415 target = ip + 4 + (gint)(read32 (ip));
7418 GET_BBLOCK (cfg, tblock, target);
7419 link_bblock (cfg, bblock, tblock);
7420 CHECK_BBLOCK (target, ip, tblock);
7421 ins->inst_target_bb = tblock;
7422 GET_BBLOCK (cfg, tblock, ip);
7424 * This leads to some inconsistency, since the two bblocks are not
7425 * really connected, but it is needed for handling stack arguments
7426 * correct (See test_0_box_brtrue_opt_regress_81102).
7428 link_bblock (cfg, bblock, tblock);
7429 if (sp != stack_start) {
7430 handle_stack_args (cfg, stack_start, sp - stack_start);
7432 CHECK_UNVERIFIABLE (cfg);
7434 MONO_ADD_INS (bblock, ins);
7435 start_new_bblock = 1;
7444 EMIT_GET_RGCTX (rgctx, context_used);
7445 if (cfg->opt & MONO_OPT_SHARED)
7446 rgctx_info = MONO_RGCTX_INFO_KLASS;
7448 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7449 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7450 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, rgctx, data);
7452 *sp++ = handle_box (cfg, val, klass);
7460 MonoInst *rgctx = NULL;
7465 token = read32 (ip + 1);
7466 klass = mini_get_class (method, token, generic_context);
7467 CHECK_TYPELOAD (klass);
7469 mono_save_token_info (cfg, image, token, klass);
7471 if (cfg->generic_sharing_context)
7472 context_used = mono_class_check_context_used (klass);
7475 EMIT_GET_RGCTX (rgctx, context_used);
7477 if (mono_class_is_nullable (klass)) {
7480 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7481 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7485 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7495 MonoClassField *field;
7499 if (*ip == CEE_STFLD) {
7506 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7508 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7511 token = read32 (ip + 1);
7512 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7513 field = mono_method_get_wrapper_data (method, token);
7514 klass = field->parent;
7517 field = mono_field_from_token (image, token, &klass, generic_context);
7521 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7522 FIELD_ACCESS_FAILURE;
7523 mono_class_init (klass);
7525 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7526 if (*ip == CEE_STFLD) {
7527 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7529 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7530 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7531 MonoInst *iargs [5];
7534 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7535 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7536 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7540 if (cfg->opt & MONO_OPT_INLINE) {
7541 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7542 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7543 g_assert (costs > 0);
7546 cfg->real_offset += 5;
7549 inline_costs += costs;
7552 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7557 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7559 store->flags |= ins_flag;
7566 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7567 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7568 MonoInst *iargs [4];
7571 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7572 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7573 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7574 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7575 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7576 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7578 g_assert (costs > 0);
7581 cfg->real_offset += 5;
7585 inline_costs += costs;
7588 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7592 if (sp [0]->type == STACK_VTYPE) {
7595 /* Have to compute the address of the variable */
7597 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7599 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7601 g_assert (var->klass == klass);
7603 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7607 if (*ip == CEE_LDFLDA) {
7608 dreg = alloc_preg (cfg);
7610 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7611 ins->klass = mono_class_from_mono_type (field->type);
7612 ins->type = STACK_MP;
7617 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7618 load->flags |= ins_flag;
7629 MonoClassField *field;
7630 gpointer addr = NULL;
7631 gboolean is_special_static;
7634 token = read32 (ip + 1);
7636 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7637 field = mono_method_get_wrapper_data (method, token);
7638 klass = field->parent;
7641 field = mono_field_from_token (image, token, &klass, generic_context);
7644 mono_class_init (klass);
7645 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7646 FIELD_ACCESS_FAILURE;
7649 * We can only support shared generic static
7650 * field access on architectures where the
7651 * trampoline code has been extended to handle
7652 * the generic class init.
7654 #ifndef MONO_ARCH_VTABLE_REG
7655 GENERIC_SHARING_FAILURE (*ip);
7658 if (cfg->generic_sharing_context)
7659 context_used = mono_class_check_context_used (klass);
7661 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7663 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7664 * to be called here.
7666 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7667 mono_class_vtable (cfg->domain, klass);
7668 CHECK_TYPELOAD (klass);
7670 mono_domain_lock (cfg->domain);
7671 if (cfg->domain->special_static_fields)
7672 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7673 mono_domain_unlock (cfg->domain);
7675 is_special_static = mono_class_field_is_special_static (field);
7677 /* Generate IR to compute the field address */
7679 if ((cfg->opt & MONO_OPT_SHARED) ||
7680 (cfg->compile_aot && is_special_static) ||
7681 (context_used && is_special_static)) {
7682 MonoInst *iargs [2];
7684 g_assert (field->parent);
7685 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7689 EMIT_GET_RGCTX (rgctx, context_used);
7690 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7692 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7694 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7695 } else if (context_used) {
7696 MonoInst *rgctx, *static_data;
7699 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7700 method->klass->name_space, method->klass->name, method->name,
7701 depth, field->offset);
7704 if (mono_class_needs_cctor_run (klass, method)) {
7706 MonoInst *vtable, *rgctx;
7708 EMIT_GET_RGCTX (rgctx, context_used);
7709 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7711 // FIXME: This doesn't work since it tries to pass the argument
7712 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7714 * The vtable pointer is always passed in a register regardless of
7715 * the calling convention, so assign it manually, and make a call
7716 * using a signature without parameters.
7718 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7719 #ifdef MONO_ARCH_VTABLE_REG
7720 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7727 * The pointer we're computing here is
7729 * super_info.static_data + field->offset
7731 EMIT_GET_RGCTX (rgctx, context_used);
7732 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7734 if (field->offset == 0) {
7737 int addr_reg = mono_alloc_preg (cfg);
7738 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7740 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7741 MonoInst *iargs [2];
7743 g_assert (field->parent);
7744 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7745 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7746 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7748 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7750 CHECK_TYPELOAD (klass);
7752 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7753 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7754 if (cfg->verbose_level > 2)
7755 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7756 class_inits = g_slist_prepend (class_inits, vtable);
7758 if (cfg->run_cctors) {
7760 /* This makes so that inline cannot trigger */
7761 /* .cctors: too many apps depend on them */
7762 /* running with a specific order... */
7763 if (! vtable->initialized)
7765 ex = mono_runtime_class_init_full (vtable, FALSE);
7767 set_exception_object (cfg, ex);
7768 goto exception_exit;
7772 addr = (char*)vtable->data + field->offset;
7774 if (cfg->compile_aot)
7775 EMIT_NEW_SFLDACONST (cfg, ins, field);
7777 EMIT_NEW_PCONST (cfg, ins, addr);
7780 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7781 * This could be later optimized to do just a couple of
7782 * memory dereferences with constant offsets.
7784 MonoInst *iargs [1];
7785 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7786 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7790 /* Generate IR to do the actual load/store operation */
7792 if (*ip == CEE_LDSFLDA) {
7793 ins->klass = mono_class_from_mono_type (field->type);
7795 } else if (*ip == CEE_STSFLD) {
7800 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7801 store->flags |= ins_flag;
7804 gboolean is_const = FALSE;
7805 MonoVTable *vtable = NULL;
7807 if (!context_used) {
7808 vtable = mono_class_vtable (cfg->domain, klass);
7809 CHECK_TYPELOAD (klass);
7811 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7812 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7813 gpointer addr = (char*)vtable->data + field->offset;
7814 int ro_type = field->type->type;
7815 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7816 ro_type = field->type->data.klass->enum_basetype->type;
7818 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7821 case MONO_TYPE_BOOLEAN:
7823 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7827 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7830 case MONO_TYPE_CHAR:
7832 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7836 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7841 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7845 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7850 case MONO_TYPE_STRING:
7851 case MONO_TYPE_OBJECT:
7852 case MONO_TYPE_CLASS:
7853 case MONO_TYPE_SZARRAY:
7855 case MONO_TYPE_FNPTR:
7856 case MONO_TYPE_ARRAY:
7857 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7858 type_to_eval_stack_type ((cfg), field->type, *sp);
7863 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7868 case MONO_TYPE_VALUETYPE:
7878 CHECK_STACK_OVF (1);
7880 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7881 load->flags |= ins_flag;
7893 token = read32 (ip + 1);
7894 klass = mini_get_class (method, token, generic_context);
7895 CHECK_TYPELOAD (klass);
7896 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7897 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7908 const char *data_ptr;
7915 token = read32 (ip + 1);
7917 klass = mini_get_class (method, token, generic_context);
7918 CHECK_TYPELOAD (klass);
7920 if (cfg->generic_sharing_context)
7921 context_used = mono_class_check_context_used (klass);
7927 /* FIXME: Decompose later to help abcrem */
7930 EMIT_GET_RGCTX (rgctx, context_used);
7931 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7936 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7938 if (cfg->opt & MONO_OPT_SHARED) {
7939 /* Decompose now to avoid problems with references to the domainvar */
7940 MonoInst *iargs [3];
7942 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7943 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7946 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7948 /* Decompose later since it is needed by abcrem */
7949 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7950 ins->dreg = alloc_preg (cfg);
7951 ins->sreg1 = sp [0]->dreg;
7952 ins->inst_newa_class = klass;
7953 ins->type = STACK_OBJ;
7955 MONO_ADD_INS (cfg->cbb, ins);
7956 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7957 cfg->cbb->has_array_access = TRUE;
7959 /* Needed so mono_emit_load_get_addr () gets called */
7960 mono_get_got_var (cfg);
7970 * we inline/optimize the initialization sequence if possible.
7971 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7972 * for small sizes open code the memcpy
7973 * ensure the rva field is big enough
7975 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7976 MonoMethod *memcpy_method = get_memcpy_method ();
7977 MonoInst *iargs [3];
7978 int add_reg = alloc_preg (cfg);
7980 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7981 if (cfg->compile_aot) {
7982 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7984 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7986 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7987 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
7996 if (sp [0]->type != STACK_OBJ)
7999 dreg = alloc_preg (cfg);
8000 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8001 ins->dreg = alloc_preg (cfg);
8002 ins->sreg1 = sp [0]->dreg;
8003 ins->type = STACK_I4;
8004 MONO_ADD_INS (cfg->cbb, ins);
8005 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8006 cfg->cbb->has_array_access = TRUE;
8014 if (sp [0]->type != STACK_OBJ)
8017 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8019 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8020 CHECK_TYPELOAD (klass);
8021 /* we need to make sure that this array is exactly the type it needs
8022 * to be for correctness. the wrappers are lax with their usage
8023 * so we need to ignore them here
8025 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8026 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8029 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8033 case CEE_LDELEM_ANY:
8044 case CEE_LDELEM_REF: {
8050 if (*ip == CEE_LDELEM_ANY) {
8052 token = read32 (ip + 1);
8053 klass = mini_get_class (method, token, generic_context);
8054 CHECK_TYPELOAD (klass);
8055 mono_class_init (klass);
8058 klass = array_access_to_klass (*ip);
8060 if (sp [0]->type != STACK_OBJ)
8063 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8065 if (sp [1]->opcode == OP_ICONST) {
8066 int array_reg = sp [0]->dreg;
8067 int index_reg = sp [1]->dreg;
8068 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8070 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8071 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8073 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8074 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8077 if (*ip == CEE_LDELEM_ANY)
8090 case CEE_STELEM_REF:
8091 case CEE_STELEM_ANY: {
8097 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8099 if (*ip == CEE_STELEM_ANY) {
8101 token = read32 (ip + 1);
8102 klass = mini_get_class (method, token, generic_context);
8103 CHECK_TYPELOAD (klass);
8104 mono_class_init (klass);
8107 klass = array_access_to_klass (*ip);
8109 if (sp [0]->type != STACK_OBJ)
8112 /* storing a NULL doesn't need any of the complex checks in stelemref */
8113 if (generic_class_is_reference_type (cfg, klass) &&
8114 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8115 MonoMethod* helper = mono_marshal_get_stelemref ();
8116 MonoInst *iargs [3];
8118 if (sp [0]->type != STACK_OBJ)
8120 if (sp [2]->type != STACK_OBJ)
8127 mono_emit_method_call (cfg, helper, iargs, NULL);
8129 if (sp [1]->opcode == OP_ICONST) {
8130 int array_reg = sp [0]->dreg;
8131 int index_reg = sp [1]->dreg;
8132 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8134 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8135 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8137 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8138 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8142 if (*ip == CEE_STELEM_ANY)
8149 case CEE_CKFINITE: {
8153 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8154 ins->sreg1 = sp [0]->dreg;
8155 ins->dreg = alloc_freg (cfg);
8156 ins->type = STACK_R8;
8157 MONO_ADD_INS (bblock, ins);
8160 mono_decompose_opcode (cfg, ins);
8165 case CEE_REFANYVAL: {
8166 MonoInst *src_var, *src;
8168 int klass_reg = alloc_preg (cfg);
8169 int dreg = alloc_preg (cfg);
8172 MONO_INST_NEW (cfg, ins, *ip);
8175 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8176 CHECK_TYPELOAD (klass);
8177 mono_class_init (klass);
8179 if (cfg->generic_sharing_context)
8180 context_used = mono_class_check_context_used (klass);
8183 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8185 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8186 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8190 MonoInst *rgctx, *klass_ins;
8192 EMIT_GET_RGCTX (rgctx, context_used);
8193 klass_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8196 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8197 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8199 mini_emit_class_check (cfg, klass_reg, klass);
8201 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8202 ins->type = STACK_MP;
8207 case CEE_MKREFANY: {
8208 MonoInst *loc, *addr;
8211 MONO_INST_NEW (cfg, ins, *ip);
8214 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8215 CHECK_TYPELOAD (klass);
8216 mono_class_init (klass);
8218 if (cfg->generic_sharing_context)
8219 context_used = mono_class_check_context_used (klass);
8221 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8222 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8225 MonoInst *rgctx, *const_ins;
8226 int type_reg = alloc_preg (cfg);
8228 EMIT_GET_RGCTX (rgctx, context_used);
8229 const_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8232 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8233 } else if (cfg->compile_aot) {
8234 int const_reg = alloc_preg (cfg);
8235 int type_reg = alloc_preg (cfg);
8237 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8238 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8239 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8242 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8243 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8245 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8247 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8248 ins->type = STACK_VTYPE;
8249 ins->klass = mono_defaults.typed_reference_class;
8256 MonoClass *handle_class;
8258 CHECK_STACK_OVF (1);
8261 n = read32 (ip + 1);
8263 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8264 handle = mono_method_get_wrapper_data (method, n);
8265 handle_class = mono_method_get_wrapper_data (method, n + 1);
8266 if (handle_class == mono_defaults.typehandle_class)
8267 handle = &((MonoClass*)handle)->byval_arg;
8270 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8274 mono_class_init (handle_class);
8275 if (cfg->generic_sharing_context) {
8276 if (handle_class == mono_defaults.typehandle_class) {
8277 /* If we get a MONO_TYPE_CLASS
8278 then we need to provide the
8280 instantiation of it. */
8281 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8284 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8285 } else if (handle_class == mono_defaults.fieldhandle_class)
8286 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8287 else if (handle_class == mono_defaults.methodhandle_class)
8288 context_used = mono_method_check_context_used (handle);
8290 g_assert_not_reached ();
8293 if (cfg->opt & MONO_OPT_SHARED) {
8294 MonoInst *addr, *vtvar, *iargs [3];
8295 int method_context_used;
8297 if (cfg->generic_sharing_context)
8298 method_context_used = mono_method_check_context_used (method);
8300 method_context_used = 0;
8302 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8304 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8305 EMIT_NEW_ICONST (cfg, iargs [1], n);
8306 if (method_context_used) {
8309 EMIT_GET_RGCTX (rgctx, method_context_used);
8310 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8311 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8313 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8314 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8316 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8320 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8322 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8323 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8324 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8325 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8326 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8327 MonoClass *tclass = mono_class_from_mono_type (handle);
8329 mono_class_init (tclass);
8333 EMIT_GET_RGCTX (rgctx, context_used);
8334 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8335 } else if (cfg->compile_aot) {
8337 * FIXME: We would have to include the context into the
8338 * aot constant too (tests/generic-array-type.2.exe).
8340 if (generic_context)
8341 cfg->disable_aot = TRUE;
8342 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8344 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8346 ins->type = STACK_OBJ;
8347 ins->klass = cmethod->klass;
8350 MonoInst *addr, *vtvar;
8352 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8357 EMIT_GET_RGCTX (rgctx, context_used);
8358 if (handle_class == mono_defaults.typehandle_class) {
8359 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8360 mono_class_from_mono_type (handle),
8361 MONO_RGCTX_INFO_TYPE);
8362 } else if (handle_class == mono_defaults.methodhandle_class) {
8363 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8364 handle, MONO_RGCTX_INFO_METHOD);
8365 } else if (handle_class == mono_defaults.fieldhandle_class) {
8366 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8367 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8369 g_assert_not_reached ();
8371 } else if (cfg->compile_aot) {
8372 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8374 EMIT_NEW_PCONST (cfg, ins, handle);
8376 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8378 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8388 MONO_INST_NEW (cfg, ins, OP_THROW);
8390 ins->sreg1 = sp [0]->dreg;
8392 bblock->out_of_line = TRUE;
8393 MONO_ADD_INS (bblock, ins);
8394 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8395 MONO_ADD_INS (bblock, ins);
8398 link_bblock (cfg, bblock, end_bblock);
8399 start_new_bblock = 1;
8401 case CEE_ENDFINALLY:
8402 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8403 MONO_ADD_INS (bblock, ins);
8405 start_new_bblock = 1;
8408 * Control will leave the method so empty the stack, otherwise
8409 * the next basic block will start with a nonempty stack.
8411 while (sp != stack_start) {
8419 if (*ip == CEE_LEAVE) {
8421 target = ip + 5 + (gint32)read32(ip + 1);
8424 target = ip + 2 + (signed char)(ip [1]);
8427 /* empty the stack */
8428 while (sp != stack_start) {
8433 * If this leave statement is in a catch block, check for a
8434 * pending exception, and rethrow it if necessary.
8436 for (i = 0; i < header->num_clauses; ++i) {
8437 MonoExceptionClause *clause = &header->clauses [i];
8440 * Use <= in the final comparison to handle clauses with multiple
8441 * leave statements, like in bug #78024.
8442 * The ordering of the exception clauses guarantees that we find the
8445 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8447 MonoBasicBlock *dont_throw;
8452 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8455 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8457 NEW_BBLOCK (cfg, dont_throw);
8460 * Currently, we allways rethrow the abort exception, despite the
8461 * fact that this is not correct. See thread6.cs for an example.
8462 * But propagating the abort exception is more important than
8463 * getting the sematics right.
8465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8466 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8467 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8469 MONO_START_BB (cfg, dont_throw);
8474 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8476 for (tmp = handlers; tmp; tmp = tmp->next) {
8478 link_bblock (cfg, bblock, tblock);
8479 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8480 ins->inst_target_bb = tblock;
8481 MONO_ADD_INS (bblock, ins);
8483 g_list_free (handlers);
8486 MONO_INST_NEW (cfg, ins, OP_BR);
8487 MONO_ADD_INS (bblock, ins);
8488 GET_BBLOCK (cfg, tblock, target);
8489 link_bblock (cfg, bblock, tblock);
8490 CHECK_BBLOCK (target, ip, tblock);
8491 ins->inst_target_bb = tblock;
8492 start_new_bblock = 1;
8494 if (*ip == CEE_LEAVE)
8503 * Mono specific opcodes
8505 case MONO_CUSTOM_PREFIX: {
8507 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8511 case CEE_MONO_ICALL: {
8513 MonoJitICallInfo *info;
8515 token = read32 (ip + 2);
8516 func = mono_method_get_wrapper_data (method, token);
8517 info = mono_find_jit_icall_by_addr (func);
8520 CHECK_STACK (info->sig->param_count);
8521 sp -= info->sig->param_count;
8523 ins = mono_emit_jit_icall (cfg, info->func, sp);
8524 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8528 inline_costs += 10 * num_calls++;
8532 case CEE_MONO_LDPTR: {
8535 CHECK_STACK_OVF (1);
8537 token = read32 (ip + 2);
8539 ptr = mono_method_get_wrapper_data (method, token);
8540 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8541 MonoJitICallInfo *callinfo;
8542 const char *icall_name;
8544 icall_name = method->name + strlen ("__icall_wrapper_");
8545 g_assert (icall_name);
8546 callinfo = mono_find_jit_icall_by_name (icall_name);
8547 g_assert (callinfo);
8549 if (ptr == callinfo->func) {
8550 /* Will be transformed into an AOTCONST later */
8551 EMIT_NEW_PCONST (cfg, ins, ptr);
8557 /* FIXME: Generalize this */
8558 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8559 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8564 EMIT_NEW_PCONST (cfg, ins, ptr);
8567 inline_costs += 10 * num_calls++;
8568 /* Can't embed random pointers into AOT code */
8569 cfg->disable_aot = 1;
8572 case CEE_MONO_ICALL_ADDR: {
8573 MonoMethod *cmethod;
8576 CHECK_STACK_OVF (1);
8578 token = read32 (ip + 2);
8580 cmethod = mono_method_get_wrapper_data (method, token);
8582 if (cfg->compile_aot) {
8583 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8585 ptr = mono_lookup_internal_call (cmethod);
8587 EMIT_NEW_PCONST (cfg, ins, ptr);
8593 case CEE_MONO_VTADDR: {
8594 MonoInst *src_var, *src;
8600 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8601 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8606 case CEE_MONO_NEWOBJ: {
8607 MonoInst *iargs [2];
8609 CHECK_STACK_OVF (1);
8611 token = read32 (ip + 2);
8612 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8613 mono_class_init (klass);
8614 NEW_DOMAINCONST (cfg, iargs [0]);
8615 MONO_ADD_INS (cfg->cbb, iargs [0]);
8616 NEW_CLASSCONST (cfg, iargs [1], klass);
8617 MONO_ADD_INS (cfg->cbb, iargs [1]);
8618 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8620 inline_costs += 10 * num_calls++;
8623 case CEE_MONO_OBJADDR:
8626 MONO_INST_NEW (cfg, ins, OP_MOVE);
8627 ins->dreg = alloc_preg (cfg);
8628 ins->sreg1 = sp [0]->dreg;
8629 ins->type = STACK_MP;
8630 MONO_ADD_INS (cfg->cbb, ins);
8634 case CEE_MONO_LDNATIVEOBJ:
8636 * Similar to LDOBJ, but instead load the unmanaged
8637 * representation of the vtype to the stack.
8642 token = read32 (ip + 2);
8643 klass = mono_method_get_wrapper_data (method, token);
8644 g_assert (klass->valuetype);
8645 mono_class_init (klass);
8648 MonoInst *src, *dest, *temp;
8651 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8652 temp->backend.is_pinvoke = 1;
8653 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8654 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8656 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8657 dest->type = STACK_VTYPE;
8658 dest->klass = klass;
8664 case CEE_MONO_RETOBJ: {
8666 * Same as RET, but return the native representation of a vtype
8669 g_assert (cfg->ret);
8670 g_assert (mono_method_signature (method)->pinvoke);
8675 token = read32 (ip + 2);
8676 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8678 if (!cfg->vret_addr) {
8679 g_assert (cfg->ret_var_is_local);
8681 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8683 EMIT_NEW_RETLOADA (cfg, ins);
8685 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8687 if (sp != stack_start)
8690 MONO_INST_NEW (cfg, ins, OP_BR);
8691 ins->inst_target_bb = end_bblock;
8692 MONO_ADD_INS (bblock, ins);
8693 link_bblock (cfg, bblock, end_bblock);
8694 start_new_bblock = 1;
8698 case CEE_MONO_CISINST:
8699 case CEE_MONO_CCASTCLASS: {
8704 token = read32 (ip + 2);
8705 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8706 if (ip [1] == CEE_MONO_CISINST)
8707 ins = handle_cisinst (cfg, klass, sp [0]);
8709 ins = handle_ccastclass (cfg, klass, sp [0]);
8715 case CEE_MONO_SAVE_LMF:
8716 case CEE_MONO_RESTORE_LMF:
8717 #ifdef MONO_ARCH_HAVE_LMF_OPS
8718 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8719 MONO_ADD_INS (bblock, ins);
8720 cfg->need_lmf_area = TRUE;
8724 case CEE_MONO_CLASSCONST:
8725 CHECK_STACK_OVF (1);
8727 token = read32 (ip + 2);
8728 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8731 inline_costs += 10 * num_calls++;
8733 case CEE_MONO_NOT_TAKEN:
8734 bblock->out_of_line = TRUE;
8738 CHECK_STACK_OVF (1);
8740 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8741 ins->dreg = alloc_preg (cfg);
8742 ins->inst_offset = (gint32)read32 (ip + 2);
8743 ins->type = STACK_PTR;
8744 MONO_ADD_INS (bblock, ins);
8749 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8759 /* somewhat similar to LDTOKEN */
8760 MonoInst *addr, *vtvar;
8761 CHECK_STACK_OVF (1);
8762 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8764 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8765 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8767 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8768 ins->type = STACK_VTYPE;
8769 ins->klass = mono_defaults.argumenthandle_class;
8782 * The following transforms:
8783 * CEE_CEQ into OP_CEQ
8784 * CEE_CGT into OP_CGT
8785 * CEE_CGT_UN into OP_CGT_UN
8786 * CEE_CLT into OP_CLT
8787 * CEE_CLT_UN into OP_CLT_UN
8789 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8791 MONO_INST_NEW (cfg, ins, cmp->opcode);
8793 cmp->sreg1 = sp [0]->dreg;
8794 cmp->sreg2 = sp [1]->dreg;
8795 type_from_op (cmp, sp [0], sp [1]);
8797 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8798 cmp->opcode = OP_LCOMPARE;
8799 else if (sp [0]->type == STACK_R8)
8800 cmp->opcode = OP_FCOMPARE;
8802 cmp->opcode = OP_ICOMPARE;
8803 MONO_ADD_INS (bblock, cmp);
8804 ins->type = STACK_I4;
8805 ins->dreg = alloc_dreg (cfg, ins->type);
8806 type_from_op (ins, sp [0], sp [1]);
8808 if (cmp->opcode == OP_FCOMPARE) {
8810 * The backends expect the fceq opcodes to do the
8813 cmp->opcode = OP_NOP;
8814 ins->sreg1 = cmp->sreg1;
8815 ins->sreg2 = cmp->sreg2;
8817 MONO_ADD_INS (bblock, ins);
8824 MonoMethod *cil_method, *ctor_method;
8825 gboolean is_shared = FALSE;
8827 CHECK_STACK_OVF (1);
8829 n = read32 (ip + 2);
8830 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8833 mono_class_init (cmethod->klass);
8835 mono_save_token_info (cfg, image, n, cmethod);
8837 if (cfg->generic_sharing_context)
8838 context_used = mono_method_check_context_used (cmethod);
8840 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8841 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8842 (cmethod->klass->generic_class ||
8843 cmethod->klass->generic_container)) {
8846 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8850 cil_method = cmethod;
8851 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8852 METHOD_ACCESS_FAILURE;
8854 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8855 if (check_linkdemand (cfg, method, cmethod))
8857 CHECK_CFG_EXCEPTION;
8858 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8859 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8863 * Optimize the common case of ldftn+delegate creation
8865 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8866 /* FIXME: SGEN support */
8867 /* FIXME: handle shared static generic methods */
8868 /* FIXME: handle this in shared code */
8869 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8870 MonoInst *target_ins;
8873 if (cfg->verbose_level > 3)
8874 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8875 target_ins = sp [-1];
8877 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8888 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8890 EMIT_GET_RGCTX (rgctx, context_used);
8891 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8892 } else if (is_shared) {
8893 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8895 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8897 if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
8898 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8900 ins = mono_emit_jit_icall (cfg, mono_ldftn_nosync, &argconst);
8904 inline_costs += 10 * num_calls++;
8907 case CEE_LDVIRTFTN: {
8912 n = read32 (ip + 2);
8913 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8916 mono_class_init (cmethod->klass);
8918 if (cfg->generic_sharing_context)
8919 context_used = mono_method_check_context_used (cmethod);
8921 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8922 if (check_linkdemand (cfg, method, cmethod))
8924 CHECK_CFG_EXCEPTION;
8925 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8926 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8935 EMIT_GET_RGCTX (rgctx, context_used);
8936 args [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8937 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8939 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8940 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8944 inline_costs += 10 * num_calls++;
8948 CHECK_STACK_OVF (1);
8950 n = read16 (ip + 2);
8952 EMIT_NEW_ARGLOAD (cfg, ins, n);
8957 CHECK_STACK_OVF (1);
8959 n = read16 (ip + 2);
8961 NEW_ARGLOADA (cfg, ins, n);
8962 MONO_ADD_INS (cfg->cbb, ins);
8970 n = read16 (ip + 2);
8972 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8974 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8978 CHECK_STACK_OVF (1);
8980 n = read16 (ip + 2);
8982 EMIT_NEW_LOCLOAD (cfg, ins, n);
8987 CHECK_STACK_OVF (1);
8989 n = read16 (ip + 2);
8991 EMIT_NEW_LOCLOADA (cfg, ins, n);
8999 n = read16 (ip + 2);
9001 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9003 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
9010 if (sp != stack_start)
9012 if (cfg->method != method)
9014 * Inlining this into a loop in a parent could lead to
9015 * stack overflows which is different behavior than the
9016 * non-inlined case, thus disable inlining in this case.
9018 goto inline_failure;
9020 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9021 ins->dreg = alloc_preg (cfg);
9022 ins->sreg1 = sp [0]->dreg;
9023 ins->type = STACK_PTR;
9024 MONO_ADD_INS (cfg->cbb, ins);
9026 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9027 if (header->init_locals)
9028 ins->flags |= MONO_INST_INIT;
9033 case CEE_ENDFILTER: {
9034 MonoExceptionClause *clause, *nearest;
9035 int cc, nearest_num;
9039 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9041 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9042 ins->sreg1 = (*sp)->dreg;
9043 MONO_ADD_INS (bblock, ins);
9044 start_new_bblock = 1;
9049 for (cc = 0; cc < header->num_clauses; ++cc) {
9050 clause = &header->clauses [cc];
9051 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9052 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9053 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9059 if ((ip - header->code) != nearest->handler_offset)
9064 case CEE_UNALIGNED_:
9065 ins_flag |= MONO_INST_UNALIGNED;
9066 /* FIXME: record alignment? we can assume 1 for now */
9071 ins_flag |= MONO_INST_VOLATILE;
9075 ins_flag |= MONO_INST_TAILCALL;
9076 cfg->flags |= MONO_CFG_HAS_TAIL;
9077 /* Can't inline tail calls at this time */
9078 inline_costs += 100000;
9085 token = read32 (ip + 2);
9086 klass = mini_get_class (method, token, generic_context);
9087 CHECK_TYPELOAD (klass);
9088 if (generic_class_is_reference_type (cfg, klass)) {
9089 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9091 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9092 mini_emit_initobj (cfg, *sp, NULL, klass);
9097 case CEE_CONSTRAINED_:
9099 token = read32 (ip + 2);
9100 constrained_call = mono_class_get_full (image, token, generic_context);
9101 CHECK_TYPELOAD (constrained_call);
9106 MonoInst *iargs [3];
9110 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9111 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9112 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9113 /* emit_memset only works when val == 0 */
9114 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9119 if (ip [1] == CEE_CPBLK) {
9120 MonoMethod *memcpy_method = get_memcpy_method ();
9121 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9123 MonoMethod *memset_method = get_memset_method ();
9124 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9134 ins_flag |= MONO_INST_NOTYPECHECK;
9136 ins_flag |= MONO_INST_NORANGECHECK;
9137 /* we ignore the no-nullcheck for now since we
9138 * really do it explicitly only when doing callvirt->call
9144 int handler_offset = -1;
9146 for (i = 0; i < header->num_clauses; ++i) {
9147 MonoExceptionClause *clause = &header->clauses [i];
9148 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY))
9149 handler_offset = clause->handler_offset;
9152 bblock->flags |= BB_EXCEPTION_UNSAFE;
9154 g_assert (handler_offset != -1);
9156 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9157 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9158 ins->sreg1 = load->dreg;
9159 MONO_ADD_INS (bblock, ins);
9161 link_bblock (cfg, bblock, end_bblock);
9162 start_new_bblock = 1;
9170 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9172 CHECK_STACK_OVF (1);
9174 token = read32 (ip + 2);
9175 /* FIXXME: handle generics. */
9176 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9177 MonoType *type = mono_type_create_from_typespec (image, token);
9178 token = mono_type_size (type, &ialign);
9180 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9181 CHECK_TYPELOAD (klass);
9182 mono_class_init (klass);
9183 token = mono_class_value_size (klass, &align);
9185 EMIT_NEW_ICONST (cfg, ins, token);
9190 case CEE_REFANYTYPE: {
9191 MonoInst *src_var, *src;
9197 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9199 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9200 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9201 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9211 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9216 g_error ("opcode 0x%02x not handled", *ip);
9219 if (start_new_bblock != 1)
9222 bblock->cil_length = ip - bblock->cil_code;
9223 bblock->next_bb = end_bblock;
9225 if (cfg->method == method && cfg->domainvar) {
9227 MonoInst *get_domain;
9229 cfg->cbb = init_localsbb;
9231 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9232 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9235 get_domain->dreg = alloc_preg (cfg);
9236 MONO_ADD_INS (cfg->cbb, get_domain);
9238 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9239 MONO_ADD_INS (cfg->cbb, store);
9242 if (cfg->method == method && cfg->got_var)
9243 mono_emit_load_got_addr (cfg);
9245 if (header->init_locals) {
9248 cfg->cbb = init_localsbb;
9249 cfg->ip = header->code;
9250 for (i = 0; i < header->num_locals; ++i) {
9251 MonoType *ptype = header->locals [i];
9252 int t = ptype->type;
9253 dreg = cfg->locals [i]->dreg;
9255 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9256 t = ptype->data.klass->enum_basetype->type;
9258 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9259 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9260 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9261 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9262 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9263 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9264 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9265 ins->type = STACK_R8;
9266 ins->inst_p0 = (void*)&r8_0;
9267 ins->dreg = alloc_dreg (cfg, STACK_R8);
9268 MONO_ADD_INS (init_localsbb, ins);
9269 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9270 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9271 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9272 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9274 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9281 /* resolve backward branches in the middle of an existing basic block */
9282 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9284 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9285 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9286 if (tblock != start_bblock) {
9288 split_bblock (cfg, tblock, bblock);
9289 l = bblock->cil_code - header->code;
9290 bblock->cil_length = tblock->cil_length - l;
9291 tblock->cil_length = l;
9293 printf ("recheck failed.\n");
9297 if (cfg->method == method) {
9299 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9300 bb->region = mono_find_block_region (cfg, bb->real_offset);
9302 mono_create_spvar_for_region (cfg, bb->region);
9303 if (cfg->verbose_level > 2)
9304 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9308 g_slist_free (class_inits);
9309 dont_inline = g_list_remove (dont_inline, method);
9311 if (inline_costs < 0) {
9314 /* Method is too large */
9315 mname = mono_method_full_name (method, TRUE);
9316 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9317 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9322 if ((cfg->verbose_level > 1) && (cfg->method == method))
9323 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9325 return inline_costs;
9328 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9329 g_slist_free (class_inits);
9330 dont_inline = g_list_remove (dont_inline, method);
9334 g_slist_free (class_inits);
9335 dont_inline = g_list_remove (dont_inline, method);
9339 g_slist_free (class_inits);
9340 dont_inline = g_list_remove (dont_inline, method);
9341 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9345 g_slist_free (class_inits);
9346 dont_inline = g_list_remove (dont_inline, method);
9347 set_exception_type_from_invalid_il (cfg, method, ip);
9352 store_membase_reg_to_store_membase_imm (int opcode)
9355 case OP_STORE_MEMBASE_REG:
9356 return OP_STORE_MEMBASE_IMM;
9357 case OP_STOREI1_MEMBASE_REG:
9358 return OP_STOREI1_MEMBASE_IMM;
9359 case OP_STOREI2_MEMBASE_REG:
9360 return OP_STOREI2_MEMBASE_IMM;
9361 case OP_STOREI4_MEMBASE_REG:
9362 return OP_STOREI4_MEMBASE_IMM;
9363 case OP_STOREI8_MEMBASE_REG:
9364 return OP_STOREI8_MEMBASE_IMM;
9366 g_assert_not_reached ();
9373 mono_op_to_op_imm (int opcode)
9383 return OP_IDIV_UN_IMM;
9387 return OP_IREM_UN_IMM;
9401 return OP_ISHR_UN_IMM;
9418 return OP_LSHR_UN_IMM;
9421 return OP_COMPARE_IMM;
9423 return OP_ICOMPARE_IMM;
9425 return OP_LCOMPARE_IMM;
9427 case OP_STORE_MEMBASE_REG:
9428 return OP_STORE_MEMBASE_IMM;
9429 case OP_STOREI1_MEMBASE_REG:
9430 return OP_STOREI1_MEMBASE_IMM;
9431 case OP_STOREI2_MEMBASE_REG:
9432 return OP_STOREI2_MEMBASE_IMM;
9433 case OP_STOREI4_MEMBASE_REG:
9434 return OP_STOREI4_MEMBASE_IMM;
9436 #if defined(__i386__) || defined (__x86_64__)
9438 return OP_X86_PUSH_IMM;
9439 case OP_X86_COMPARE_MEMBASE_REG:
9440 return OP_X86_COMPARE_MEMBASE_IMM;
9442 #if defined(__x86_64__)
9443 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9444 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9446 case OP_VOIDCALL_REG:
9455 return OP_LOCALLOC_IMM;
9462 ldind_to_load_membase (int opcode)
9466 return OP_LOADI1_MEMBASE;
9468 return OP_LOADU1_MEMBASE;
9470 return OP_LOADI2_MEMBASE;
9472 return OP_LOADU2_MEMBASE;
9474 return OP_LOADI4_MEMBASE;
9476 return OP_LOADU4_MEMBASE;
9478 return OP_LOAD_MEMBASE;
9480 return OP_LOAD_MEMBASE;
9482 return OP_LOADI8_MEMBASE;
9484 return OP_LOADR4_MEMBASE;
9486 return OP_LOADR8_MEMBASE;
9488 g_assert_not_reached ();
9495 stind_to_store_membase (int opcode)
9499 return OP_STOREI1_MEMBASE_REG;
9501 return OP_STOREI2_MEMBASE_REG;
9503 return OP_STOREI4_MEMBASE_REG;
9506 return OP_STORE_MEMBASE_REG;
9508 return OP_STOREI8_MEMBASE_REG;
9510 return OP_STORER4_MEMBASE_REG;
9512 return OP_STORER8_MEMBASE_REG;
9514 g_assert_not_reached ();
9521 mono_load_membase_to_load_mem (int opcode)
9523 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9524 #if defined(__i386__) || defined(__x86_64__)
9526 case OP_LOAD_MEMBASE:
9528 case OP_LOADU1_MEMBASE:
9529 return OP_LOADU1_MEM;
9530 case OP_LOADU2_MEMBASE:
9531 return OP_LOADU2_MEM;
9532 case OP_LOADI4_MEMBASE:
9533 return OP_LOADI4_MEM;
9534 case OP_LOADU4_MEMBASE:
9535 return OP_LOADU4_MEM;
9536 #if SIZEOF_VOID_P == 8
9537 case OP_LOADI8_MEMBASE:
9538 return OP_LOADI8_MEM;
9547 op_to_op_dest_membase (int store_opcode, int opcode)
9549 #if defined(__i386__)
9550 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9555 return OP_X86_ADD_MEMBASE_REG;
9557 return OP_X86_SUB_MEMBASE_REG;
9559 return OP_X86_AND_MEMBASE_REG;
9561 return OP_X86_OR_MEMBASE_REG;
9563 return OP_X86_XOR_MEMBASE_REG;
9566 return OP_X86_ADD_MEMBASE_IMM;
9569 return OP_X86_SUB_MEMBASE_IMM;
9572 return OP_X86_AND_MEMBASE_IMM;
9575 return OP_X86_OR_MEMBASE_IMM;
9578 return OP_X86_XOR_MEMBASE_IMM;
9584 #if defined(__x86_64__)
9585 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9590 return OP_X86_ADD_MEMBASE_REG;
9592 return OP_X86_SUB_MEMBASE_REG;
9594 return OP_X86_AND_MEMBASE_REG;
9596 return OP_X86_OR_MEMBASE_REG;
9598 return OP_X86_XOR_MEMBASE_REG;
9600 return OP_X86_ADD_MEMBASE_IMM;
9602 return OP_X86_SUB_MEMBASE_IMM;
9604 return OP_X86_AND_MEMBASE_IMM;
9606 return OP_X86_OR_MEMBASE_IMM;
9608 return OP_X86_XOR_MEMBASE_IMM;
9610 return OP_AMD64_ADD_MEMBASE_REG;
9612 return OP_AMD64_SUB_MEMBASE_REG;
9614 return OP_AMD64_AND_MEMBASE_REG;
9616 return OP_AMD64_OR_MEMBASE_REG;
9618 return OP_AMD64_XOR_MEMBASE_REG;
9621 return OP_AMD64_ADD_MEMBASE_IMM;
9624 return OP_AMD64_SUB_MEMBASE_IMM;
9627 return OP_AMD64_AND_MEMBASE_IMM;
9630 return OP_AMD64_OR_MEMBASE_IMM;
9633 return OP_AMD64_XOR_MEMBASE_IMM;
9643 op_to_op_store_membase (int store_opcode, int opcode)
9645 #if defined(__i386__) || defined(__x86_64__)
9648 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9649 return OP_X86_SETEQ_MEMBASE;
9651 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9652 return OP_X86_SETNE_MEMBASE;
9660 op_to_op_src1_membase (int load_opcode, int opcode)
9663 /* FIXME: This has sign extension issues */
9665 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9666 return OP_X86_COMPARE_MEMBASE8_IMM;
9669 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9674 return OP_X86_PUSH_MEMBASE;
9675 case OP_COMPARE_IMM:
9676 case OP_ICOMPARE_IMM:
9677 return OP_X86_COMPARE_MEMBASE_IMM;
9680 return OP_X86_COMPARE_MEMBASE_REG;
9685 /* FIXME: This has sign extension issues */
9687 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9688 return OP_X86_COMPARE_MEMBASE8_IMM;
9693 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9694 return OP_X86_PUSH_MEMBASE;
9696 /* FIXME: This only works for 32 bit immediates
9697 case OP_COMPARE_IMM:
9698 case OP_LCOMPARE_IMM:
9699 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9700 return OP_AMD64_COMPARE_MEMBASE_IMM;
9702 case OP_ICOMPARE_IMM:
9703 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9704 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9708 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9709 return OP_AMD64_COMPARE_MEMBASE_REG;
9712 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9713 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9722 op_to_op_src2_membase (int load_opcode, int opcode)
9725 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9731 return OP_X86_COMPARE_REG_MEMBASE;
9733 return OP_X86_ADD_REG_MEMBASE;
9735 return OP_X86_SUB_REG_MEMBASE;
9737 return OP_X86_AND_REG_MEMBASE;
9739 return OP_X86_OR_REG_MEMBASE;
9741 return OP_X86_XOR_REG_MEMBASE;
9748 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9749 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9753 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9754 return OP_AMD64_COMPARE_REG_MEMBASE;
9757 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9758 return OP_X86_ADD_REG_MEMBASE;
9760 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9761 return OP_X86_SUB_REG_MEMBASE;
9763 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9764 return OP_X86_AND_REG_MEMBASE;
9766 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9767 return OP_X86_OR_REG_MEMBASE;
9769 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9770 return OP_X86_XOR_REG_MEMBASE;
9772 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9773 return OP_AMD64_ADD_REG_MEMBASE;
9775 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9776 return OP_AMD64_SUB_REG_MEMBASE;
9778 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9779 return OP_AMD64_AND_REG_MEMBASE;
9781 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9782 return OP_AMD64_OR_REG_MEMBASE;
9784 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9785 return OP_AMD64_XOR_REG_MEMBASE;
9793 mono_op_to_op_imm_noemul (int opcode)
9796 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9801 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9809 return mono_op_to_op_imm (opcode);
9814 * mono_handle_global_vregs:
9816 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9820 mono_handle_global_vregs (MonoCompile *cfg)
9826 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9828 /* Find local vregs used in more than one bb */
9829 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9830 MonoInst *ins = bb->code;
9831 int block_num = bb->block_num;
9833 if (cfg->verbose_level > 1)
9834 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9837 for (; ins; ins = ins->next) {
9838 const char *spec = INS_INFO (ins->opcode);
9839 int regtype, regindex;
9842 if (G_UNLIKELY (cfg->verbose_level > 1))
9843 mono_print_ins (ins);
9845 g_assert (ins->opcode >= MONO_CEE_LAST);
9847 for (regindex = 0; regindex < 3; regindex ++) {
9850 if (regindex == 0) {
9851 regtype = spec [MONO_INST_DEST];
9855 } else if (regindex == 1) {
9856 regtype = spec [MONO_INST_SRC1];
9861 regtype = spec [MONO_INST_SRC2];
9867 #if SIZEOF_VOID_P == 4
9868 if (regtype == 'l') {
9870 * Since some instructions reference the original long vreg,
9871 * and some reference the two component vregs, it is quite hard
9872 * to determine when it needs to be global. So be conservative.
9874 if (!get_vreg_to_inst (cfg, vreg)) {
9875 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9877 if (cfg->verbose_level > 1)
9878 printf ("LONG VREG R%d made global.\n", vreg);
9882 * Make the component vregs volatile since the optimizations can
9883 * get confused otherwise.
9885 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9886 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9890 g_assert (vreg != -1);
9892 prev_bb = vreg_to_bb [vreg];
9894 /* 0 is a valid block num */
9895 vreg_to_bb [vreg] = block_num + 1;
9896 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9897 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9900 if (!get_vreg_to_inst (cfg, vreg)) {
9901 if (G_UNLIKELY (cfg->verbose_level > 1))
9902 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9906 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9909 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9912 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9915 g_assert_not_reached ();
9919 /* Flag as having been used in more than one bb */
9920 vreg_to_bb [vreg] = -1;
9926 /* If a variable is used in only one bblock, convert it into a local vreg */
9927 for (i = 0; i < cfg->num_varinfo; i++) {
9928 MonoInst *var = cfg->varinfo [i];
9929 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9931 switch (var->type) {
9937 #if SIZEOF_VOID_P == 8
9940 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9941 /* Enabling this screws up the fp stack on x86 */
9944 /* Arguments are implicitly global */
9945 /* Putting R4 vars into registers doesn't work currently */
9946 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9948 * Make that the variable's liveness interval doesn't contain a call, since
9949 * that would cause the lvreg to be spilled, making the whole optimization
9952 /* This is too slow for JIT compilation */
9954 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9956 int def_index, call_index, ins_index;
9957 gboolean spilled = FALSE;
9962 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9963 const char *spec = INS_INFO (ins->opcode);
9965 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9966 def_index = ins_index;
9968 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9969 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9970 if (call_index > def_index) {
9976 if (MONO_IS_CALL (ins))
9977 call_index = ins_index;
9987 if (G_UNLIKELY (cfg->verbose_level > 2))
9988 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9989 var->flags |= MONO_INST_IS_DEAD;
9990 cfg->vreg_to_inst [var->dreg] = NULL;
9997 * Compress the varinfo and vars tables so the liveness computation is faster and
9998 * takes up less space.
10001 for (i = 0; i < cfg->num_varinfo; ++i) {
10002 MonoInst *var = cfg->varinfo [i];
10003 if (pos < i && cfg->locals_start == i)
10004 cfg->locals_start = pos;
10005 if (!(var->flags & MONO_INST_IS_DEAD)) {
10007 cfg->varinfo [pos] = cfg->varinfo [i];
10008 cfg->varinfo [pos]->inst_c0 = pos;
10009 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10010 cfg->vars [pos].idx = pos;
10011 #if SIZEOF_VOID_P == 4
10012 if (cfg->varinfo [pos]->type == STACK_I8) {
10013 /* Modify the two component vars too */
10016 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10017 var1->inst_c0 = pos;
10018 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10019 var1->inst_c0 = pos;
10026 cfg->num_varinfo = pos;
10027 if (cfg->locals_start > cfg->num_varinfo)
10028 cfg->locals_start = cfg->num_varinfo;
10032 * mono_spill_global_vars:
10034 * Generate spill code for variables which are not allocated to registers,
10035 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10036 * code is generated which could be optimized by the local optimization passes.
10039 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10041 MonoBasicBlock *bb;
10043 int orig_next_vreg;
10044 guint32 *vreg_to_lvreg;
10046 guint32 i, lvregs_len;
10047 gboolean dest_has_lvreg = FALSE;
10048 guint32 stacktypes [128];
10050 *need_local_opts = FALSE;
10052 memset (spec2, 0, sizeof (spec2));
10054 /* FIXME: Move this function to mini.c */
10055 stacktypes ['i'] = STACK_PTR;
10056 stacktypes ['l'] = STACK_I8;
10057 stacktypes ['f'] = STACK_R8;
10059 #if SIZEOF_VOID_P == 4
10060 /* Create MonoInsts for longs */
10061 for (i = 0; i < cfg->num_varinfo; i++) {
10062 MonoInst *ins = cfg->varinfo [i];
10064 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10065 switch (ins->type) {
10066 #ifdef MONO_ARCH_SOFT_FLOAT
10072 g_assert (ins->opcode == OP_REGOFFSET);
10074 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10076 tree->opcode = OP_REGOFFSET;
10077 tree->inst_basereg = ins->inst_basereg;
10078 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10080 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10082 tree->opcode = OP_REGOFFSET;
10083 tree->inst_basereg = ins->inst_basereg;
10084 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10094 /* FIXME: widening and truncation */
10097 * As an optimization, when a variable allocated to the stack is first loaded into
10098 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10099 * the variable again.
10101 orig_next_vreg = cfg->next_vreg;
10102 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10103 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10106 /* Add spill loads/stores */
10107 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10110 if (cfg->verbose_level > 1)
10111 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10113 /* Clear vreg_to_lvreg array */
10114 for (i = 0; i < lvregs_len; i++)
10115 vreg_to_lvreg [lvregs [i]] = 0;
10119 MONO_BB_FOR_EACH_INS (bb, ins) {
10120 const char *spec = INS_INFO (ins->opcode);
10121 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10122 gboolean store, no_lvreg;
10124 if (G_UNLIKELY (cfg->verbose_level > 1))
10125 mono_print_ins (ins);
10127 if (ins->opcode == OP_NOP)
10131 * We handle LDADDR here as well, since it can only be decomposed
10132 * when variable addresses are known.
10134 if (ins->opcode == OP_LDADDR) {
10135 MonoInst *var = ins->inst_p0;
10137 if (var->opcode == OP_VTARG_ADDR) {
10138 /* Happens on SPARC/S390 where vtypes are passed by reference */
10139 MonoInst *vtaddr = var->inst_left;
10140 if (vtaddr->opcode == OP_REGVAR) {
10141 ins->opcode = OP_MOVE;
10142 ins->sreg1 = vtaddr->dreg;
10144 else if (var->inst_left->opcode == OP_REGOFFSET) {
10145 ins->opcode = OP_LOAD_MEMBASE;
10146 ins->inst_basereg = vtaddr->inst_basereg;
10147 ins->inst_offset = vtaddr->inst_offset;
10151 g_assert (var->opcode == OP_REGOFFSET);
10153 ins->opcode = OP_ADD_IMM;
10154 ins->sreg1 = var->inst_basereg;
10155 ins->inst_imm = var->inst_offset;
10158 *need_local_opts = TRUE;
10159 spec = INS_INFO (ins->opcode);
10162 if (ins->opcode < MONO_CEE_LAST) {
10163 mono_print_ins (ins);
10164 g_assert_not_reached ();
10168 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10172 if (MONO_IS_STORE_MEMBASE (ins)) {
10173 tmp_reg = ins->dreg;
10174 ins->dreg = ins->sreg2;
10175 ins->sreg2 = tmp_reg;
10178 spec2 [MONO_INST_DEST] = ' ';
10179 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10180 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10182 } else if (MONO_IS_STORE_MEMINDEX (ins))
10183 g_assert_not_reached ();
10188 if (G_UNLIKELY (cfg->verbose_level > 1))
10189 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10194 regtype = spec [MONO_INST_DEST];
10195 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10198 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10199 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10200 MonoInst *store_ins;
10203 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10205 if (var->opcode == OP_REGVAR) {
10206 ins->dreg = var->dreg;
10207 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10209 * Instead of emitting a load+store, use a _membase opcode.
10211 g_assert (var->opcode == OP_REGOFFSET);
10212 if (ins->opcode == OP_MOVE) {
10215 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10216 ins->inst_basereg = var->inst_basereg;
10217 ins->inst_offset = var->inst_offset;
10220 spec = INS_INFO (ins->opcode);
10224 g_assert (var->opcode == OP_REGOFFSET);
10226 prev_dreg = ins->dreg;
10228 /* Invalidate any previous lvreg for this vreg */
10229 vreg_to_lvreg [ins->dreg] = 0;
10233 #ifdef MONO_ARCH_SOFT_FLOAT
10234 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10236 store_opcode = OP_STOREI8_MEMBASE_REG;
10240 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10242 if (regtype == 'l') {
10243 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10244 mono_bblock_insert_after_ins (bb, ins, store_ins);
10245 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10246 mono_bblock_insert_after_ins (bb, ins, store_ins);
10249 g_assert (store_opcode != OP_STOREV_MEMBASE);
10251 /* Try to fuse the store into the instruction itself */
10252 /* FIXME: Add more instructions */
10253 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10254 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10255 ins->inst_imm = ins->inst_c0;
10256 ins->inst_destbasereg = var->inst_basereg;
10257 ins->inst_offset = var->inst_offset;
10258 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10259 ins->opcode = store_opcode;
10260 ins->inst_destbasereg = var->inst_basereg;
10261 ins->inst_offset = var->inst_offset;
10265 tmp_reg = ins->dreg;
10266 ins->dreg = ins->sreg2;
10267 ins->sreg2 = tmp_reg;
10270 spec2 [MONO_INST_DEST] = ' ';
10271 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10272 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10274 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10275 // FIXME: The backends expect the base reg to be in inst_basereg
10276 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10278 ins->inst_basereg = var->inst_basereg;
10279 ins->inst_offset = var->inst_offset;
10280 spec = INS_INFO (ins->opcode);
10282 /* printf ("INS: "); mono_print_ins (ins); */
10283 /* Create a store instruction */
10284 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10286 /* Insert it after the instruction */
10287 mono_bblock_insert_after_ins (bb, ins, store_ins);
10290 * We can't assign ins->dreg to var->dreg here, since the
10291 * sregs could use it. So set a flag, and do it after
10294 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10295 dest_has_lvreg = TRUE;
10304 for (srcindex = 0; srcindex < 2; ++srcindex) {
10305 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10306 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10308 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10309 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10310 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10311 MonoInst *load_ins;
10312 guint32 load_opcode;
10314 if (var->opcode == OP_REGVAR) {
10316 ins->sreg1 = var->dreg;
10318 ins->sreg2 = var->dreg;
10322 g_assert (var->opcode == OP_REGOFFSET);
10324 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10326 g_assert (load_opcode != OP_LOADV_MEMBASE);
10328 if (vreg_to_lvreg [sreg]) {
10329 /* The variable is already loaded to an lvreg */
10330 if (G_UNLIKELY (cfg->verbose_level > 1))
10331 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10333 ins->sreg1 = vreg_to_lvreg [sreg];
10335 ins->sreg2 = vreg_to_lvreg [sreg];
10339 /* Try to fuse the load into the instruction */
10340 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10341 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10342 ins->inst_basereg = var->inst_basereg;
10343 ins->inst_offset = var->inst_offset;
10344 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10345 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10346 ins->sreg2 = var->inst_basereg;
10347 ins->inst_offset = var->inst_offset;
10349 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10350 ins->opcode = OP_NOP;
10353 //printf ("%d ", srcindex); mono_print_ins (ins);
10355 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10357 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10358 if (var->dreg == prev_dreg) {
10360 * sreg refers to the value loaded by the load
10361 * emitted below, but we need to use ins->dreg
10362 * since it refers to the store emitted earlier.
10366 vreg_to_lvreg [var->dreg] = sreg;
10367 g_assert (lvregs_len < 1024);
10368 lvregs [lvregs_len ++] = var->dreg;
10377 if (regtype == 'l') {
10378 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10379 mono_bblock_insert_before_ins (bb, ins, load_ins);
10380 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10381 mono_bblock_insert_before_ins (bb, ins, load_ins);
10384 #if SIZEOF_VOID_P == 4
10385 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10387 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10388 mono_bblock_insert_before_ins (bb, ins, load_ins);
10394 if (dest_has_lvreg) {
10395 vreg_to_lvreg [prev_dreg] = ins->dreg;
10396 g_assert (lvregs_len < 1024);
10397 lvregs [lvregs_len ++] = prev_dreg;
10398 dest_has_lvreg = FALSE;
10402 tmp_reg = ins->dreg;
10403 ins->dreg = ins->sreg2;
10404 ins->sreg2 = tmp_reg;
10407 if (MONO_IS_CALL (ins)) {
10408 /* Clear vreg_to_lvreg array */
10409 for (i = 0; i < lvregs_len; i++)
10410 vreg_to_lvreg [lvregs [i]] = 0;
10414 if (cfg->verbose_level > 1)
10415 mono_print_ins_index (1, ins);
10422 * - use 'iadd' instead of 'int_add'
10423 * - handling ovf opcodes: decompose in method_to_ir.
10424 * - unify iregs/fregs
10425 * -> partly done, the missing parts are:
10426 * - a more complete unification would involve unifying the hregs as well, so
10427 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10428 * would no longer map to the machine hregs, so the code generators would need to
10429 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10430 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10431 * fp/non-fp branches speeds it up by about 15%.
10432 * - use sext/zext opcodes instead of shifts
10434 * - get rid of TEMPLOADs if possible and use vregs instead
10435 * - clean up usage of OP_P/OP_ opcodes
10436 * - cleanup usage of DUMMY_USE
10437 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10439 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10440 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10441 * - make sure handle_stack_args () is called before the branch is emitted
10442 * - when the new IR is done, get rid of all unused stuff
10443 * - COMPARE/BEQ as separate instructions or unify them ?
10444 * - keeping them separate allows specialized compare instructions like
10445 * compare_imm, compare_membase
10446 * - most back ends unify fp compare+branch, fp compare+ceq
10447 * - integrate handle_stack_args into inline_method
10448 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10449 * - Things to backport to the old JIT:
10450 * - op_atomic_exchange fix for amd64
10451 * - localloc fix for amd64
10452 * - x86 type_token change
10454 * - long eq/ne optimizations
10455 * - handle long shift opts on 32 bit platforms somehow: they require
10456 * 3 sregs (2 for arg1 and 1 for arg2)
10457 * - make byref a 'normal' type.
10458 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10459 * variable if needed.
10460 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10461 * like inline_method.
10462 * - remove inlining restrictions
10463 * - remove mono_save_args.
10464 * - add 'introduce a new optimization to simplify some range checks'
10465 * - fix LNEG and enable cfold of INEG
10466 * - generalize x86 optimizations like ldelema as a peephole optimization
10467 * - add store_mem_imm for amd64
10468 * - optimize the loading of the interruption flag in the managed->native wrappers
10469 * - avoid special handling of OP_NOP in passes
10470 * - move code inserting instructions into one function/macro.
10471 * - cleanup the code replacement in decompose_long_opts ()
10472 * - try a coalescing phase after liveness analysis
10473 * - add float -> vreg conversion + local optimizations on !x86
10474 * - figure out how to handle decomposed branches during optimizations, ie.
10475 * compare+branch, op_jump_table+op_br etc.
10476 * - promote RuntimeXHandles to vregs
10477 * - vtype cleanups:
10478 * - add a NEW_VARLOADA_VREG macro
10479 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10480 * accessing vtype fields.
10481 * - get rid of I8CONST on 64 bit platforms
10482 * - dealing with the increase in code size due to branches created during opcode
10484 * - use extended basic blocks
10485 * - all parts of the JIT
10486 * - handle_global_vregs () && local regalloc
10487 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10488 * - sources of increase in code size:
10491 * - isinst and castclass
10492 * - lvregs not allocated to global registers even if used multiple times
10493 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10495 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10496 * - add all micro optimizations from the old JIT
10497 * - put tree optimizations into the deadce pass
10498 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10499 * specific function.
10500 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10501 * fcompare + branchCC.
10502 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10503 * running generics.exe.
10504 * - create a helper function for allocating a stack slot, taking into account
10505 * MONO_CFG_HAS_SPILLUP.
10506 * - merge new GC changes in mini.c.
10508 * - merge the ia64 switch changes.
10509 * - merge the mips conditional changes.
10510 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10511 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10512 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10513 * - optimize mono_regstate2_alloc_int/float.
10514 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10515 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10516 * parts of the tree could be separated by other instructions, killing the tree
10517 * arguments, or stores killing loads etc. Also, should we fold loads into other
10518 * instructions if the result of the load is used multiple times ?
10519 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10520 * - LAST MERGE: 108395.
10521 * - when returning vtypes in registers, generate IR and append it to the end of the
10522 * last bb instead of doing it in the epilog.
10523 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10524 * ones in inssel.h.
10525 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10533 - When to decompose opcodes:
10534 - earlier: this makes some optimizations hard to implement, since the low level IR
10535 no longer contains the neccessary information. But it is easier to do.
10536 - later: harder to implement, enables more optimizations.
10537 - Branches inside bblocks:
10538 - created when decomposing complex opcodes.
10539 - branches to another bblock: harmless, but not tracked by the branch
10540 optimizations, so need to branch to a label at the start of the bblock.
10541 - branches to inside the same bblock: very problematic, trips up the local
10542 reg allocator. Can be fixed by spitting the current bblock, but that is a
10543 complex operation, since some local vregs can become global vregs etc.
10544 - Local/global vregs:
10545 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10546 local register allocator.
10547 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10548 structure, created by mono_create_var (). Assigned to hregs or the stack by
10549 the global register allocator.
10550 - When to do optimizations like alu->alu_imm:
10551 - earlier -> saves work later on since the IR will be smaller/simpler
10552 - later -> can work on more instructions
10553 - Handling of valuetypes:
10554 - When a vtype is pushed on the stack, a new tempotary is created, an
10555 instruction computing its address (LDADDR) is emitted and pushed on
10556 the stack. Need to optimize cases when the vtype is used immediately as in
10557 argument passing, stloc etc.
10558 - Instead of the to_end stuff in the old JIT, simply call the function handling
10559 the values on the stack before emitting the last instruction of the bb.