2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > -1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(ins, arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2177 if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call);
2196 inline static MonoInst*
2197 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2199 inline static MonoCallInst *
2200 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2201 MonoInst **args, int calli, int virtual)
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2208 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2211 call->signature = sig;
2213 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2215 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2216 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2219 temp->backend.is_pinvoke = sig->pinvoke;
2222 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2223 * address of return value to increase optimization opportunities.
2224 * Before vtype decomposition, the dreg of the call ins itself represents the
2225 * fact the call modifies the return value. After decomposition, the call will
2226 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2227 * will be transformed into an LDADDR.
2229 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2230 loada->dreg = alloc_preg (cfg);
2231 loada->inst_p0 = temp;
2232 /* We reference the call too since call->dreg could change during optimization */
2233 loada->inst_p1 = call;
2234 MONO_ADD_INS (cfg->cbb, loada);
2236 call->inst.dreg = temp->dreg;
2238 call->vret_var = loada;
2239 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2240 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2242 #ifdef MONO_ARCH_SOFT_FLOAT
2244 * If the call has a float argument, we would need to do an r8->r4 conversion using
2245 * an icall, but that cannot be done during the call sequence since it would clobber
2246 * the call registers + the stack. So we do it before emitting the call.
2248 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2250 MonoInst *in = call->args [i];
2252 if (i >= sig->hasthis)
2253 t = sig->params [i - sig->hasthis];
2255 t = &mono_defaults.int_class->byval_arg;
2256 t = mono_type_get_underlying_type (t);
2258 if (!t->byref && t->type == MONO_TYPE_R4) {
2259 MonoInst *iargs [1];
2263 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2265 /* The result will be in an int vreg */
2266 call->args [i] = conv;
2271 mono_arch_emit_call (cfg, call);
2273 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2274 cfg->flags |= MONO_CFG_HAS_CALLS;
2279 inline static MonoInst*
2280 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2282 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2284 call->inst.sreg1 = addr->dreg;
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2291 inline static MonoInst*
2292 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2294 #ifdef MONO_ARCH_RGCTX_REG
2296 int rgctx_reg = mono_alloc_preg (cfg);
2298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2299 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2300 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2301 return (MonoInst*)call;
2303 g_assert_not_reached ();
2309 mono_emit_imt_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2310 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2312 gboolean virtual = this != NULL;
2313 gboolean enable_for_aot = TRUE;
2316 if (method->string_ctor) {
2317 /* Create the real signature */
2318 /* FIXME: Cache these */
2319 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup (sig);
2320 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2325 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2327 if (this && sig->hasthis &&
2328 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2329 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2330 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2332 call->method = method;
2334 call->inst.flags |= MONO_INST_HAS_METHOD;
2335 call->inst.inst_left = this;
2338 int vtable_reg, slot_reg, this_reg;
2340 this_reg = this->dreg;
2342 if ((!cfg->compile_aot || enable_for_aot) &&
2343 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2344 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2345 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2347 * the method is not virtual, we just need to ensure this is not null
2348 * and then we can call the method directly.
2350 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2351 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2354 if (!method->string_ctor) {
2355 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2356 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2357 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2360 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2362 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2364 return (MonoInst*)call;
2367 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2368 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2369 /* Make a call to delegate->invoke_impl */
2370 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2371 call->inst.inst_basereg = this_reg;
2372 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2373 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2375 return (MonoInst*)call;
2379 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2380 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2381 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2383 * the method is virtual, but we can statically dispatch since either
2384 * it's class or the method itself are sealed.
2385 * But first we need to ensure it's not a null reference.
2387 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2388 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2391 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2399 /* Initialize method->slot */
2400 mono_class_setup_vtable (method->klass);
2402 vtable_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2404 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2406 #ifdef MONO_ARCH_HAVE_IMT
2408 guint32 imt_slot = mono_method_get_imt_slot (method);
2409 emit_imt_argument (cfg, call, imt_arg);
2410 slot_reg = vtable_reg;
2411 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2414 if (slot_reg == -1) {
2415 slot_reg = alloc_preg (cfg);
2416 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2417 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2420 slot_reg = vtable_reg;
2421 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2424 call->inst.sreg1 = slot_reg;
2425 call->virtual = TRUE;
2428 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2430 return (MonoInst*)call;
2433 static inline MonoInst*
2434 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2435 MonoInst **args, MonoInst *this)
2437 return mono_emit_imt_method_call (cfg, method, sig, args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 inline static MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 get_memcpy_method (void)
2469 static MonoMethod *memcpy_method = NULL;
2470 if (!memcpy_method) {
2471 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2473 g_error ("Old corlib found. Install a new one");
2475 return memcpy_method;
2479 * Emit code to copy a valuetype of type @klass whose address is stored in
2480 * @src->dreg to memory whose address is stored at @dest->dreg.
2483 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2485 MonoInst *iargs [3];
2488 MonoMethod *memcpy_method;
2492 * This check breaks with spilled vars... need to handle it during verification anyway.
2493 * g_assert (klass && klass == src->klass && klass == dest->klass);
2497 n = mono_class_native_size (klass, &align);
2499 n = mono_class_value_size (klass, &align);
2501 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2502 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2503 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2507 EMIT_NEW_ICONST (cfg, iargs [2], n);
2509 memcpy_method = get_memcpy_method ();
2510 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
2515 get_memset_method (void)
2517 static MonoMethod *memset_method = NULL;
2518 if (!memset_method) {
2519 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2521 g_error ("Old corlib found. Install a new one");
2523 return memset_method;
2527 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2529 MonoInst *iargs [3];
2532 MonoMethod *memset_method;
2534 /* FIXME: Optimize this for the case when dest is an LDADDR */
2536 mono_class_init (klass);
2537 n = mono_class_value_size (klass, &align);
2539 if (n <= sizeof (gpointer) * 5) {
2540 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2543 memset_method = get_memset_method ();
2545 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2546 EMIT_NEW_ICONST (cfg, iargs [2], n);
2547 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
2552 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2554 MonoInst *this = NULL;
2556 g_assert (!method->klass->valuetype);
2558 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2559 EMIT_NEW_ARGLOAD (cfg, this, 0);
2561 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2562 MonoInst *mrgctx_loc, *mrgctx_var;
2565 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2567 mrgctx_loc = mono_get_vtable_var (cfg);
2568 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2571 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2572 MonoInst *vtable_loc, *vtable_var;
2576 vtable_loc = mono_get_vtable_var (cfg);
2577 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2579 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2580 MonoInst *mrgctx_var = vtable_var;
2583 vtable_reg = alloc_preg (cfg);
2584 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2585 vtable_var->type = STACK_PTR;
2591 int vtable_reg, res_reg;
2593 vtable_reg = alloc_preg (cfg);
2594 res_reg = alloc_preg (cfg);
2595 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2600 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2601 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2602 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2606 emit_get_rgctx_other_table_ptr (MonoCompile *cfg, MonoInst *rgc_ptr, int slot)
2608 MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
2609 guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
2611 return mono_emit_native_call (cfg, tramp, sig, &rgc_ptr);
2615 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2616 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2618 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2619 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, cfg->generic_context);
2621 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2625 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2626 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2628 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2629 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, cmethod, rgctx_type, cfg->generic_context);
2631 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2635 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2636 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2638 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2639 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, field, rgctx_type, cfg->generic_context);
2641 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2645 emit_get_rgctx_method_rgctx (MonoCompile *cfg, int context_used,
2646 MonoInst *rgctx, MonoMethod *rgctx_method)
2648 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2649 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
2650 MONO_RGCTX_INFO_METHOD_RGCTX, cfg->generic_context);
2652 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2656 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2660 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2662 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2663 // Can't encode method ref
2664 cfg->disable_aot = TRUE;
2667 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2668 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2670 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2672 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2677 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2681 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2682 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2683 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2684 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2686 obj_reg = sp [0]->dreg;
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2690 /* FIXME: generics */
2691 g_assert (klass->rank == 0);
2694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2695 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2701 MonoInst *element_class;
2703 /* This assertion is from the unboxcast insn */
2704 g_assert (klass->rank == 0);
2706 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2707 klass->element_class, MONO_RGCTX_INFO_KLASS);
2709 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2712 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2715 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2716 MONO_ADD_INS (cfg->cbb, add);
2717 add->type = STACK_MP;
2724 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2726 MonoInst *iargs [2];
2729 if (cfg->opt & MONO_OPT_SHARED) {
2730 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2731 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2733 alloc_ftn = mono_object_new;
2734 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2735 /* This happens often in argument checking code, eg. throw new FooException... */
2736 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2737 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2738 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2740 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2741 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2744 if (managed_alloc) {
2745 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2746 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, NULL);
2748 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2750 guint32 lw = vtable->klass->instance_size;
2751 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2752 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2753 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2756 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2760 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2764 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2767 MonoInst *iargs [2];
2768 MonoMethod *managed_alloc = NULL;
2772 FIXME: we cannot get managed_alloc here because we can't get
2773 the class's vtable (because it's not a closed class)
2775 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2776 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2779 if (cfg->opt & MONO_OPT_SHARED) {
2780 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2781 iargs [1] = data_inst;
2782 alloc_ftn = mono_object_new;
2784 g_assert (!cfg->compile_aot);
2786 if (managed_alloc) {
2787 iargs [0] = data_inst;
2788 return mono_emit_method_call (cfg, managed_alloc,
2789 mono_method_signature (managed_alloc), iargs, NULL);
2792 iargs [0] = data_inst;
2793 alloc_ftn = mono_object_new_specific;
2796 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2800 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2802 MonoInst *alloc, *ins;
2804 if (mono_class_is_nullable (klass)) {
2805 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2806 // Can't encode method ref
2807 cfg->disable_aot = TRUE;
2808 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2811 alloc = handle_alloc (cfg, klass, TRUE);
2813 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2819 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, MonoInst *data_inst)
2821 MonoInst *alloc, *ins;
2823 g_assert (!mono_class_is_nullable (klass));
2825 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2827 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2833 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2835 MonoBasicBlock *is_null_bb;
2836 int obj_reg = src->dreg;
2837 int vtable_reg = alloc_preg (cfg);
2839 NEW_BBLOCK (cfg, is_null_bb);
2841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2842 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2844 if (mini_get_debug_options ()->better_cast_details) {
2845 int to_klass_reg = alloc_preg (cfg);
2846 int klass_reg = alloc_preg (cfg);
2847 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2850 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2854 MONO_ADD_INS (cfg->cbb, tls_get);
2855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2859 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2863 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2865 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2867 int klass_reg = alloc_preg (cfg);
2869 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2871 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2872 /* the remoting code is broken, access the class for now */
2874 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2877 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2880 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2883 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2887 MONO_START_BB (cfg, is_null_bb);
2889 /* Reset the variables holding the cast details */
2890 if (mini_get_debug_options ()->better_cast_details) {
2891 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2893 MONO_ADD_INS (cfg->cbb, tls_get);
2894 /* It is enough to reset the from field */
2895 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2902 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2905 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2906 int obj_reg = src->dreg;
2907 int vtable_reg = alloc_preg (cfg);
2908 int res_reg = alloc_preg (cfg);
2910 NEW_BBLOCK (cfg, is_null_bb);
2911 NEW_BBLOCK (cfg, false_bb);
2912 NEW_BBLOCK (cfg, end_bb);
2914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2917 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2919 /* the is_null_bb target simply copies the input register to the output */
2920 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2922 int klass_reg = alloc_preg (cfg);
2924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2927 int rank_reg = alloc_preg (cfg);
2928 int eclass_reg = alloc_preg (cfg);
2930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2933 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2935 if (klass->cast_class == mono_defaults.object_class) {
2936 int parent_reg = alloc_preg (cfg);
2937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2938 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2939 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2941 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2942 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2943 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2945 } else if (klass->cast_class == mono_defaults.enum_class) {
2946 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2948 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2949 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2951 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2952 /* Check that the object is a vector too */
2953 int bounds_reg = alloc_preg (cfg);
2954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2959 /* the is_null_bb target simply copies the input register to the output */
2960 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2962 } else if (mono_class_is_nullable (klass)) {
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2964 /* the is_null_bb target simply copies the input register to the output */
2965 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2967 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2968 /* the remoting code is broken, access the class for now */
2970 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2980 /* the is_null_bb target simply copies the input register to the output */
2981 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2986 MONO_START_BB (cfg, false_bb);
2988 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2991 MONO_START_BB (cfg, is_null_bb);
2993 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2994 ins->type = STACK_OBJ;
2997 MONO_START_BB (cfg, end_bb);
3003 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3005 /* This opcode takes as input an object reference and a class, and returns:
3006 0) if the object is an instance of the class,
3007 1) if the object is not instance of the class,
3008 2) if the object is a proxy whose type cannot be determined */
3011 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3012 int obj_reg = src->dreg;
3013 int dreg = alloc_ireg (cfg);
3015 int klass_reg = alloc_preg (cfg);
3017 NEW_BBLOCK (cfg, true_bb);
3018 NEW_BBLOCK (cfg, false_bb);
3019 NEW_BBLOCK (cfg, false2_bb);
3020 NEW_BBLOCK (cfg, end_bb);
3021 NEW_BBLOCK (cfg, no_proxy_bb);
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3026 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3027 NEW_BBLOCK (cfg, interface_fail_bb);
3029 tmp_reg = alloc_preg (cfg);
3030 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3031 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3032 MONO_START_BB (cfg, interface_fail_bb);
3033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3035 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3037 tmp_reg = alloc_preg (cfg);
3038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3040 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3042 tmp_reg = alloc_preg (cfg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3046 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3047 tmp_reg = alloc_preg (cfg);
3048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3051 tmp_reg = alloc_preg (cfg);
3052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3056 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3059 MONO_START_BB (cfg, no_proxy_bb);
3061 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3064 MONO_START_BB (cfg, false_bb);
3066 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3069 MONO_START_BB (cfg, false2_bb);
3071 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3074 MONO_START_BB (cfg, true_bb);
3076 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3078 MONO_START_BB (cfg, end_bb);
3081 MONO_INST_NEW (cfg, ins, OP_ICONST);
3083 ins->type = STACK_I4;
3089 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3091 /* This opcode takes as input an object reference and a class, and returns:
3092 0) if the object is an instance of the class,
3093 1) if the object is a proxy whose type cannot be determined
3094 an InvalidCastException exception is thrown otherwhise*/
3097 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3098 int obj_reg = src->dreg;
3099 int dreg = alloc_ireg (cfg);
3100 int tmp_reg = alloc_preg (cfg);
3101 int klass_reg = alloc_preg (cfg);
3103 NEW_BBLOCK (cfg, end_bb);
3104 NEW_BBLOCK (cfg, ok_result_bb);
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3109 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3110 NEW_BBLOCK (cfg, interface_fail_bb);
3112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3113 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3114 MONO_START_BB (cfg, interface_fail_bb);
3115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3117 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3119 tmp_reg = alloc_preg (cfg);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3122 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3124 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3128 NEW_BBLOCK (cfg, no_proxy_bb);
3130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3132 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3134 tmp_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3138 tmp_reg = alloc_preg (cfg);
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3143 NEW_BBLOCK (cfg, fail_1_bb);
3145 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3147 MONO_START_BB (cfg, fail_1_bb);
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 MONO_START_BB (cfg, no_proxy_bb);
3154 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3157 MONO_START_BB (cfg, ok_result_bb);
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3161 MONO_START_BB (cfg, end_bb);
3164 MONO_INST_NEW (cfg, ins, OP_ICONST);
3166 ins->type = STACK_I4;
3171 static G_GNUC_UNUSED MonoInst*
3172 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3174 gpointer *trampoline;
3175 MonoInst *obj, *method_ins, *tramp_ins;
3179 obj = handle_alloc (cfg, klass, FALSE);
3181 /* Inline the contents of mono_delegate_ctor */
3183 /* Set target field */
3184 /* Optimize away setting of NULL target */
3185 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3186 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3188 /* Set method field */
3189 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3190 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3193 * To avoid looking up the compiled code belonging to the target method
3194 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3195 * store it, and we fill it after the method has been compiled.
3197 if (!cfg->compile_aot && !method->dynamic) {
3198 MonoInst *code_slot_ins;
3200 domain = mono_domain_get ();
3201 mono_domain_lock (domain);
3202 if (!domain->method_code_hash)
3203 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3204 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3206 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3207 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3209 mono_domain_unlock (domain);
3211 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3212 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3215 /* Set invoke_impl field */
3216 trampoline = mono_create_delegate_trampoline (klass);
3217 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3220 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3226 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3228 MonoJitICallInfo *info;
3230 /* Need to register the icall so it gets an icall wrapper */
3231 info = mono_get_array_new_va_icall (rank);
3233 cfg->flags |= MONO_CFG_HAS_VARARGS;
3235 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3236 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3240 mono_emit_load_got_addr (MonoCompile *cfg)
3242 MonoInst *getaddr, *dummy_use;
3244 if (!cfg->got_var || cfg->got_var_allocated)
3247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3248 getaddr->dreg = cfg->got_var->dreg;
3250 /* Add it to the start of the first bblock */
3251 if (cfg->bb_entry->code) {
3252 getaddr->next = cfg->bb_entry->code;
3253 cfg->bb_entry->code = getaddr;
3256 MONO_ADD_INS (cfg->bb_entry, getaddr);
3258 cfg->got_var_allocated = TRUE;
3261 * Add a dummy use to keep the got_var alive, since real uses might
3262 * only be generated by the back ends.
3263 * Add it to end_bblock, so the variable's lifetime covers the whole
3265 * It would be better to make the usage of the got var explicit in all
3266 * cases when the backend needs it (i.e. calls, throw etc.), so this
3267 * wouldn't be needed.
3269 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3270 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3273 #define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
3276 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3278 MonoMethodHeader *header = mono_method_get_header (method);
3280 #ifdef MONO_ARCH_SOFT_FLOAT
3281 MonoMethodSignature *sig = mono_method_signature (method);
3285 if (cfg->generic_sharing_context)
3288 #ifdef MONO_ARCH_HAVE_LMF_OPS
3289 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3290 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3291 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3295 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3296 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3297 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3298 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3299 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3300 (method->klass->marshalbyref) ||
3301 !header || header->num_clauses)
3304 /* also consider num_locals? */
3305 /* Do the size check early to avoid creating vtables */
3306 if (getenv ("MONO_INLINELIMIT")) {
3307 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3310 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3314 * if we can initialize the class of the method right away, we do,
3315 * otherwise we don't allow inlining if the class needs initialization,
3316 * since it would mean inserting a call to mono_runtime_class_init()
3317 * inside the inlined code
3319 if (!(cfg->opt & MONO_OPT_SHARED)) {
3320 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3321 if (cfg->run_cctors && method->klass->has_cctor) {
3322 if (!method->klass->runtime_info)
3323 /* No vtable created yet */
3325 vtable = mono_class_vtable (cfg->domain, method->klass);
3328 /* This makes so that inline cannot trigger */
3329 /* .cctors: too many apps depend on them */
3330 /* running with a specific order... */
3331 if (! vtable->initialized)
3333 mono_runtime_class_init (vtable);
3335 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3336 if (!method->klass->runtime_info)
3337 /* No vtable created yet */
3339 vtable = mono_class_vtable (cfg->domain, method->klass);
3342 if (!vtable->initialized)
3347 * If we're compiling for shared code
3348 * the cctor will need to be run at aot method load time, for example,
3349 * or at the end of the compilation of the inlining method.
3351 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3356 * CAS - do not inline methods with declarative security
3357 * Note: this has to be before any possible return TRUE;
3359 if (mono_method_has_declsec (method))
3362 #ifdef MONO_ARCH_SOFT_FLOAT
3364 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3366 for (i = 0; i < sig->param_count; ++i)
3367 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3375 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3377 if (vtable->initialized && !cfg->compile_aot)
3380 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3383 if (!mono_class_needs_cctor_run (vtable->klass, method))
3386 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3387 /* The initialization is already done before the method is called */
3394 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3398 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3400 mono_class_init (klass);
3401 size = mono_class_array_element_size (klass);
3403 mult_reg = alloc_preg (cfg);
3404 array_reg = arr->dreg;
3405 index_reg = index->dreg;
3407 #if SIZEOF_VOID_P == 8
3408 /* The array reg is 64 bits but the index reg is only 32 */
3409 index2_reg = alloc_preg (cfg);
3410 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3412 index2_reg = index_reg;
3415 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3417 #if defined(__i386__) || defined(__x86_64__)
3418 if (size == 1 || size == 2 || size == 4 || size == 8) {
3419 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3421 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3422 ins->type = STACK_PTR;
3428 add_reg = alloc_preg (cfg);
3430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3432 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3433 ins->type = STACK_PTR;
3434 MONO_ADD_INS (cfg->cbb, ins);
3439 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3441 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3443 int bounds_reg = alloc_preg (cfg);
3444 int add_reg = alloc_preg (cfg);
3445 int mult_reg = alloc_preg (cfg);
3446 int mult2_reg = alloc_preg (cfg);
3447 int low1_reg = alloc_preg (cfg);
3448 int low2_reg = alloc_preg (cfg);
3449 int high1_reg = alloc_preg (cfg);
3450 int high2_reg = alloc_preg (cfg);
3451 int realidx1_reg = alloc_preg (cfg);
3452 int realidx2_reg = alloc_preg (cfg);
3453 int sum_reg = alloc_preg (cfg);
3458 mono_class_init (klass);
3459 size = mono_class_array_element_size (klass);
3461 index1 = index_ins1->dreg;
3462 index2 = index_ins2->dreg;
3464 /* range checking */
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3466 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3469 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3470 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3472 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3473 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3474 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3477 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3478 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3480 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3481 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3482 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3484 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3487 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3488 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3490 ins->type = STACK_MP;
3492 MONO_ADD_INS (cfg->cbb, ins);
3499 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3503 MonoMethod *addr_method;
3506 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3509 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3511 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3512 /* emit_ldelema_2 depends on OP_LMUL */
3513 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3514 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3518 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3519 addr_method = mono_marshal_get_array_address (rank, element_size);
3520 addr = mono_emit_method_call (cfg, addr_method, addr_method->signature, sp, NULL);
3526 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3528 MonoInst *ins = NULL;
3530 static MonoClass *runtime_helpers_class = NULL;
3531 if (! runtime_helpers_class)
3532 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3533 "System.Runtime.CompilerServices", "RuntimeHelpers");
3535 if (cmethod->klass == mono_defaults.string_class) {
3536 if (strcmp (cmethod->name, "get_Chars") == 0) {
3537 int dreg = alloc_ireg (cfg);
3538 int index_reg = alloc_preg (cfg);
3539 int mult_reg = alloc_preg (cfg);
3540 int add_reg = alloc_preg (cfg);
3542 #if SIZEOF_VOID_P == 8
3543 /* The array reg is 64 bits but the index reg is only 32 */
3544 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3546 index_reg = args [1]->dreg;
3548 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3550 #if defined(__i386__) || defined(__x86_64__)
3551 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3552 add_reg = ins->dreg;
3553 /* Avoid a warning */
3555 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3559 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3560 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3561 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3563 type_from_op (ins, NULL, NULL);
3565 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3566 int dreg = alloc_ireg (cfg);
3567 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3568 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3569 type_from_op (ins, NULL, NULL);
3572 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3573 int mult_reg = alloc_preg (cfg);
3574 int add_reg = alloc_preg (cfg);
3576 /* The corlib functions check for oob already. */
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3578 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3579 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3582 } else if (cmethod->klass == mono_defaults.object_class) {
3584 if (strcmp (cmethod->name, "GetType") == 0) {
3585 int dreg = alloc_preg (cfg);
3586 int vt_reg = alloc_preg (cfg);
3587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3588 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3589 type_from_op (ins, NULL, NULL);
3592 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3593 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3594 int dreg = alloc_ireg (cfg);
3595 int t1 = alloc_ireg (cfg);
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3599 ins->type = STACK_I4;
3603 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3604 MONO_INST_NEW (cfg, ins, OP_NOP);
3605 MONO_ADD_INS (cfg->cbb, ins);
3609 } else if (cmethod->klass == mono_defaults.array_class) {
3610 if (cmethod->name [0] != 'g')
3613 if (strcmp (cmethod->name, "get_Rank") == 0) {
3614 int dreg = alloc_ireg (cfg);
3615 int vtable_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3617 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3619 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3620 type_from_op (ins, NULL, NULL);
3623 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3624 int dreg = alloc_ireg (cfg);
3626 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3627 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3628 type_from_op (ins, NULL, NULL);
3633 } else if (cmethod->klass == runtime_helpers_class) {
3635 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3636 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3640 } else if (cmethod->klass == mono_defaults.thread_class) {
3641 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3642 ins->dreg = alloc_preg (cfg);
3643 ins->type = STACK_OBJ;
3644 MONO_ADD_INS (cfg->cbb, ins);
3646 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3647 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3648 MONO_ADD_INS (cfg->cbb, ins);
3651 } else if (mini_class_is_system_array (cmethod->klass) &&
3652 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3653 MonoInst *addr, *store, *load;
3654 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3656 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3657 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3660 } else if (cmethod->klass->image == mono_defaults.corlib &&
3661 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3662 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3665 #if SIZEOF_VOID_P == 8
3666 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3667 /* 64 bit reads are already atomic */
3668 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3669 ins->dreg = mono_alloc_preg (cfg);
3670 ins->inst_basereg = args [0]->dreg;
3671 ins->inst_offset = 0;
3672 MONO_ADD_INS (cfg->cbb, ins);
3676 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3677 if (strcmp (cmethod->name, "Increment") == 0) {
3678 MonoInst *ins_iconst;
3681 if (fsig->params [0]->type == MONO_TYPE_I4)
3682 opcode = OP_ATOMIC_ADD_NEW_I4;
3683 #if SIZEOF_VOID_P == 8
3684 else if (fsig->params [0]->type == MONO_TYPE_I8)
3685 opcode = OP_ATOMIC_ADD_NEW_I8;
3688 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3689 ins_iconst->inst_c0 = 1;
3690 ins_iconst->dreg = mono_alloc_ireg (cfg);
3691 MONO_ADD_INS (cfg->cbb, ins_iconst);
3693 MONO_INST_NEW (cfg, ins, opcode);
3694 ins->dreg = mono_alloc_ireg (cfg);
3695 ins->inst_basereg = args [0]->dreg;
3696 ins->inst_offset = 0;
3697 ins->sreg2 = ins_iconst->dreg;
3698 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3699 MONO_ADD_INS (cfg->cbb, ins);
3701 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3702 MonoInst *ins_iconst;
3705 if (fsig->params [0]->type == MONO_TYPE_I4)
3706 opcode = OP_ATOMIC_ADD_NEW_I4;
3707 #if SIZEOF_VOID_P == 8
3708 else if (fsig->params [0]->type == MONO_TYPE_I8)
3709 opcode = OP_ATOMIC_ADD_NEW_I8;
3712 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3713 ins_iconst->inst_c0 = -1;
3714 ins_iconst->dreg = mono_alloc_ireg (cfg);
3715 MONO_ADD_INS (cfg->cbb, ins_iconst);
3717 MONO_INST_NEW (cfg, ins, opcode);
3718 ins->dreg = mono_alloc_ireg (cfg);
3719 ins->inst_basereg = args [0]->dreg;
3720 ins->inst_offset = 0;
3721 ins->sreg2 = ins_iconst->dreg;
3722 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3723 MONO_ADD_INS (cfg->cbb, ins);
3725 } else if (strcmp (cmethod->name, "Add") == 0) {
3728 if (fsig->params [0]->type == MONO_TYPE_I4)
3729 opcode = OP_ATOMIC_ADD_NEW_I4;
3730 #if SIZEOF_VOID_P == 8
3731 else if (fsig->params [0]->type == MONO_TYPE_I8)
3732 opcode = OP_ATOMIC_ADD_NEW_I8;
3736 MONO_INST_NEW (cfg, ins, opcode);
3737 ins->dreg = mono_alloc_ireg (cfg);
3738 ins->inst_basereg = args [0]->dreg;
3739 ins->inst_offset = 0;
3740 ins->sreg2 = args [1]->dreg;
3741 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3742 MONO_ADD_INS (cfg->cbb, ins);
3745 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3747 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3748 if (strcmp (cmethod->name, "Exchange") == 0) {
3751 if (fsig->params [0]->type == MONO_TYPE_I4)
3752 opcode = OP_ATOMIC_EXCHANGE_I4;
3753 #if SIZEOF_VOID_P == 8
3754 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3755 (fsig->params [0]->type == MONO_TYPE_I) ||
3756 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3757 opcode = OP_ATOMIC_EXCHANGE_I8;
3759 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3760 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3761 opcode = OP_ATOMIC_EXCHANGE_I4;
3766 MONO_INST_NEW (cfg, ins, opcode);
3767 ins->dreg = mono_alloc_ireg (cfg);
3768 ins->inst_basereg = args [0]->dreg;
3769 ins->inst_offset = 0;
3770 ins->sreg2 = args [1]->dreg;
3771 MONO_ADD_INS (cfg->cbb, ins);
3773 switch (fsig->params [0]->type) {
3775 ins->type = STACK_I4;
3779 ins->type = STACK_I8;
3781 case MONO_TYPE_OBJECT:
3782 ins->type = STACK_OBJ;
3785 g_assert_not_reached ();
3788 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3790 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3792 * Can't implement CompareExchange methods this way since they have
3793 * three arguments. We can implement one of the common cases, where the new
3794 * value is a constant.
3796 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3797 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3798 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3799 ins->dreg = alloc_ireg (cfg);
3800 ins->sreg1 = args [0]->dreg;
3801 ins->sreg2 = args [1]->dreg;
3802 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3803 ins->type = STACK_I4;
3804 MONO_ADD_INS (cfg->cbb, ins);
3806 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3808 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3812 } else if (cmethod->klass->image == mono_defaults.corlib) {
3813 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3814 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3815 MONO_INST_NEW (cfg, ins, OP_BREAK);
3816 MONO_ADD_INS (cfg->cbb, ins);
3819 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3820 && strcmp (cmethod->klass->name, "Environment") == 0) {
3821 #ifdef PLATFORM_WIN32
3822 EMIT_NEW_ICONST (cfg, ins, 1);
3824 EMIT_NEW_ICONST (cfg, ins, 0);
3828 } else if (cmethod->klass == mono_defaults.math_class) {
3830 * There is general branches code for Min/Max, but it does not work for
3832 * http://everything2.com/?node_id=1051618
3836 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3840 * This entry point could be used later for arbitrary method
3843 inline static MonoInst*
3844 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3845 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3847 if (method->klass == mono_defaults.string_class) {
3848 /* managed string allocation support */
3849 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3850 MonoInst *iargs [2];
3851 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3852 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3855 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3856 iargs [1] = args [0];
3857 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, this);
3864 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp, MonoInst **args)
3866 MonoInst *store, *temp;
3869 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3870 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3873 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3874 * would be different than the MonoInst's used to represent arguments, and
3875 * the ldelema implementation can't deal with that.
3876 * Solution: When ldelema is used on an inline argument, create a var for
3877 * it, emit ldelema on that var, and emit the saving code below in
3878 * inline_method () if needed.
3880 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3882 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, *sp);
3883 store->cil_code = sp [0]->cil_code;
3888 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3889 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3891 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3893 mono_inline_called_method_name_limit = NULL;
3894 static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) {
3895 char *called_method_name = mono_method_full_name (called_method, TRUE);
3898 if (mono_inline_called_method_name_limit == NULL) {
3899 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3900 if (limit_string != NULL) {
3901 mono_inline_called_method_name_limit = limit_string;
3903 mono_inline_called_method_name_limit = (char *) "";
3907 strncmp_result = strncmp (called_method_name, mono_inline_called_method_name_limit, strlen (mono_inline_called_method_name_limit));
3908 g_free (called_method_name);
3910 //return (strncmp_result <= 0);
3911 return (strncmp_result == 0);
3915 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3917 mono_inline_caller_method_name_limit = NULL;
3918 static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) {
3919 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3922 if (mono_inline_caller_method_name_limit == NULL) {
3923 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3924 if (limit_string != NULL) {
3925 mono_inline_caller_method_name_limit = limit_string;
3927 mono_inline_caller_method_name_limit = (char *) "";
3931 strncmp_result = strncmp (caller_method_name, mono_inline_caller_method_name_limit, strlen (mono_inline_caller_method_name_limit));
3932 g_free (caller_method_name);
3934 //return (strncmp_result <= 0);
3935 return (strncmp_result == 0);
3940 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3941 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3943 MonoInst *ins, *rvar = NULL;
3944 MonoMethodHeader *cheader;
3945 MonoBasicBlock *ebblock, *sbblock;
3947 MonoMethod *prev_inlined_method;
3948 MonoInst **prev_locals, **prev_args;
3949 MonoType **prev_arg_types;
3950 guint prev_real_offset;
3951 GHashTable *prev_cbb_hash;
3952 MonoBasicBlock **prev_cil_offset_to_bb;
3953 MonoBasicBlock *prev_cbb;
3954 unsigned char* prev_cil_start;
3955 guint32 prev_cil_offset_to_bb_len;
3956 MonoMethod *prev_current_method;
3957 MonoGenericContext *prev_generic_context;
3959 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3961 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3962 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3965 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3966 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3970 if (cfg->verbose_level > 2)
3971 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3973 if (!cmethod->inline_info) {
3974 mono_jit_stats.inlineable_methods++;
3975 cmethod->inline_info = 1;
3977 /* allocate space to store the return value */
3978 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3979 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3982 /* allocate local variables */
3983 cheader = mono_method_get_header (cmethod);
3984 prev_locals = cfg->locals;
3985 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3986 for (i = 0; i < cheader->num_locals; ++i)
3987 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3989 /* allocate start and end blocks */
3990 /* This is needed so if the inline is aborted, we can clean up */
3991 NEW_BBLOCK (cfg, sbblock);
3992 sbblock->real_offset = real_offset;
3994 NEW_BBLOCK (cfg, ebblock);
3995 ebblock->block_num = cfg->num_bblocks++;
3996 ebblock->real_offset = real_offset;
3998 prev_args = cfg->args;
3999 prev_arg_types = cfg->arg_types;
4000 prev_inlined_method = cfg->inlined_method;
4001 cfg->inlined_method = cmethod;
4002 cfg->ret_var_set = FALSE;
4003 prev_real_offset = cfg->real_offset;
4004 prev_cbb_hash = cfg->cbb_hash;
4005 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4006 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4007 prev_cil_start = cfg->cil_start;
4008 prev_cbb = cfg->cbb;
4009 prev_current_method = cfg->current_method;
4010 prev_generic_context = cfg->generic_context;
4012 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4014 cfg->inlined_method = prev_inlined_method;
4015 cfg->real_offset = prev_real_offset;
4016 cfg->cbb_hash = prev_cbb_hash;
4017 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4018 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4019 cfg->cil_start = prev_cil_start;
4020 cfg->locals = prev_locals;
4021 cfg->args = prev_args;
4022 cfg->arg_types = prev_arg_types;
4023 cfg->current_method = prev_current_method;
4024 cfg->generic_context = prev_generic_context;
4026 if ((costs >= 0 && costs < 60) || inline_allways) {
4027 if (cfg->verbose_level > 2)
4028 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4030 mono_jit_stats.inlined_methods++;
4032 /* always add some code to avoid block split failures */
4033 MONO_INST_NEW (cfg, ins, OP_NOP);
4034 MONO_ADD_INS (prev_cbb, ins);
4036 prev_cbb->next_bb = sbblock;
4037 link_bblock (cfg, prev_cbb, sbblock);
4040 * Get rid of the begin and end bblocks if possible to aid local
4043 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4045 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4046 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4048 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4049 MonoBasicBlock *prev = ebblock->in_bb [0];
4050 mono_merge_basic_blocks (cfg, prev, ebblock);
4058 * If the inlined method contains only a throw, then the ret var is not
4059 * set, so set it to a dummy value.
4061 if (!cfg->ret_var_set) {
4062 static double r8_0 = 0.0;
4064 switch (rvar->type) {
4066 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4069 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4074 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4077 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4078 ins->type = STACK_R8;
4079 ins->inst_p0 = (void*)&r8_0;
4080 ins->dreg = rvar->dreg;
4081 MONO_ADD_INS (cfg->cbb, ins);
4084 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4087 g_assert_not_reached ();
4091 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4096 if (cfg->verbose_level > 2)
4097 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4098 cfg->exception_type = MONO_EXCEPTION_NONE;
4099 mono_loader_clear_error ();
4101 /* This gets rid of the newly added bblocks */
4102 cfg->cbb = prev_cbb;
4108 * Some of these comments may well be out-of-date.
4109 * Design decisions: we do a single pass over the IL code (and we do bblock
4110 * splitting/merging in the few cases when it's required: a back jump to an IL
4111 * address that was not already seen as bblock starting point).
4112 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4113 * Complex operations are decomposed in simpler ones right away. We need to let the
4114 * arch-specific code peek and poke inside this process somehow (except when the
4115 * optimizations can take advantage of the full semantic info of coarse opcodes).
4116 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4117 * MonoInst->opcode initially is the IL opcode or some simplification of that
4118 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4119 * opcode with value bigger than OP_LAST.
4120 * At this point the IR can be handed over to an interpreter, a dumb code generator
4121 * or to the optimizing code generator that will translate it to SSA form.
4123 * Profiling directed optimizations.
4124 * We may compile by default with few or no optimizations and instrument the code
4125 * or the user may indicate what methods to optimize the most either in a config file
4126 * or through repeated runs where the compiler applies offline the optimizations to
4127 * each method and then decides if it was worth it.
4130 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4131 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4132 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4133 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4134 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4135 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4136 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4137 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4139 /* offset from br.s -> br like opcodes */
4140 #define BIG_BRANCH_OFFSET 13
4143 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4145 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4147 return b == NULL || b == bb;
4151 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4153 unsigned char *ip = start;
4154 unsigned char *target;
4157 MonoBasicBlock *bblock;
4158 const MonoOpcode *opcode;
4161 cli_addr = ip - start;
4162 i = mono_opcode_value ((const guint8 **)&ip, end);
4165 opcode = &mono_opcodes [i];
4166 switch (opcode->argument) {
4167 case MonoInlineNone:
4170 case MonoInlineString:
4171 case MonoInlineType:
4172 case MonoInlineField:
4173 case MonoInlineMethod:
4176 case MonoShortInlineR:
4183 case MonoShortInlineVar:
4184 case MonoShortInlineI:
4187 case MonoShortInlineBrTarget:
4188 target = start + cli_addr + 2 + (signed char)ip [1];
4189 GET_BBLOCK (cfg, bblock, target);
4192 GET_BBLOCK (cfg, bblock, ip);
4194 case MonoInlineBrTarget:
4195 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4196 GET_BBLOCK (cfg, bblock, target);
4199 GET_BBLOCK (cfg, bblock, ip);
4201 case MonoInlineSwitch: {
4202 guint32 n = read32 (ip + 1);
4205 cli_addr += 5 + 4 * n;
4206 target = start + cli_addr;
4207 GET_BBLOCK (cfg, bblock, target);
4209 for (j = 0; j < n; ++j) {
4210 target = start + cli_addr + (gint32)read32 (ip);
4211 GET_BBLOCK (cfg, bblock, target);
4221 g_assert_not_reached ();
4224 if (i == CEE_THROW) {
4225 unsigned char *bb_start = ip - 1;
4227 /* Find the start of the bblock containing the throw */
4229 while ((bb_start >= start) && !bblock) {
4230 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4234 bblock->out_of_line = 1;
4243 static inline MonoMethod *
4244 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4248 if (m->wrapper_type != MONO_WRAPPER_NONE)
4249 return mono_method_get_wrapper_data (m, token);
4251 method = mono_get_method_full (m->klass->image, token, klass, context);
4256 static inline MonoMethod *
4257 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4259 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4261 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4267 static inline MonoClass*
4268 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4272 if (method->wrapper_type != MONO_WRAPPER_NONE)
4273 klass = mono_method_get_wrapper_data (method, token);
4275 klass = mono_class_get_full (method->klass->image, token, context);
4277 mono_class_init (klass);
4282 * Returns TRUE if the JIT should abort inlining because "callee"
4283 * is influenced by security attributes.
4286 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4290 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4294 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4295 if (result == MONO_JIT_SECURITY_OK)
4298 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4299 /* Generate code to throw a SecurityException before the actual call/link */
4300 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4303 NEW_ICONST (cfg, args [0], 4);
4304 NEW_METHODCONST (cfg, args [1], caller);
4305 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, mono_method_signature (secman->linkdemandsecurityexception), args, NULL);
4306 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4307 /* don't hide previous results */
4308 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4309 cfg->exception_data = result;
4317 method_access_exception (void)
4319 static MonoMethod *method = NULL;
4322 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4323 method = mono_class_get_method_from_name (secman->securitymanager,
4324 "MethodAccessException", 2);
4331 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4332 MonoBasicBlock *bblock, unsigned char *ip)
4334 MonoMethod *thrower = method_access_exception ();
4337 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4338 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4339 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), args, NULL);
4343 verification_exception (void)
4345 static MonoMethod *method = NULL;
4348 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4349 method = mono_class_get_method_from_name (secman->securitymanager,
4350 "VerificationException", 0);
4357 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4359 MonoMethod *thrower = verification_exception ();
4361 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), NULL, NULL);
4365 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4366 MonoBasicBlock *bblock, unsigned char *ip)
4368 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4369 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4370 gboolean is_safe = TRUE;
4372 if (!(caller_level >= callee_level ||
4373 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4374 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4379 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4383 method_is_safe (MonoMethod *method)
4386 if (strcmp (method->name, "unsafeMethod") == 0)
4393 * Check that the IL instructions at ip are the array initialization
4394 * sequence and return the pointer to the data and the size.
4397 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4400 * newarr[System.Int32]
4402 * ldtoken field valuetype ...
4403 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4405 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4406 guint32 token = read32 (ip + 7);
4407 guint32 field_token = read32 (ip + 2);
4408 guint32 field_index = field_token & 0xffffff;
4410 const char *data_ptr;
4412 MonoMethod *cmethod;
4413 MonoClass *dummy_class;
4414 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4420 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4423 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4425 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4426 case MONO_TYPE_BOOLEAN:
4430 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4431 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4432 case MONO_TYPE_CHAR:
4442 return NULL; /* stupid ARM FP swapped format */
4452 if (size > mono_type_size (field->type, &dummy_align))
4455 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4456 field_index = read32 (ip + 2) & 0xffffff;
4457 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4458 data_ptr = mono_image_rva_map (method->klass->image, rva);
4459 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4460 /* for aot code we do the lookup on load */
4461 if (aot && data_ptr)
4462 return GUINT_TO_POINTER (rva);
4469 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4471 char *method_fname = mono_method_full_name (method, TRUE);
4474 if (mono_method_get_header (method)->code_size == 0)
4475 method_code = g_strdup ("method body is empty.");
4477 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4478 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4479 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4480 g_free (method_fname);
4481 g_free (method_code);
4485 set_exception_object (MonoCompile *cfg, MonoException *exception)
4487 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4488 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4489 cfg->exception_ptr = exception;
4493 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4497 if (cfg->generic_sharing_context)
4498 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4500 type = &klass->byval_arg;
4501 return MONO_TYPE_IS_REFERENCE (type);
4505 * mono_decompose_array_access_opts:
4507 * Decompose array access opcodes.
4510 mono_decompose_array_access_opts (MonoCompile *cfg)
4512 MonoBasicBlock *bb, *first_bb;
4515 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4516 * can be executed anytime. It should be run before decompose_long
4520 * Create a dummy bblock and emit code into it so we can use the normal
4521 * code generation macros.
4523 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4524 first_bb = cfg->cbb;
4526 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4528 MonoInst *prev = NULL;
4530 MonoInst *iargs [3];
4533 if (!bb->has_array_access)
4536 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4538 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4544 for (ins = bb->code; ins; ins = ins->next) {
4545 switch (ins->opcode) {
4547 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4548 G_STRUCT_OFFSET (MonoArray, max_length));
4549 MONO_ADD_INS (cfg->cbb, dest);
4551 case OP_BOUNDS_CHECK:
4552 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4555 if (cfg->opt & MONO_OPT_SHARED) {
4556 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4557 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4558 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4559 iargs [2]->dreg = ins->sreg1;
4561 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4562 dest->dreg = ins->dreg;
4564 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4567 NEW_VTABLECONST (cfg, iargs [0], vtable);
4568 MONO_ADD_INS (cfg->cbb, iargs [0]);
4569 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4570 iargs [1]->dreg = ins->sreg1;
4572 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4573 dest->dreg = ins->dreg;
4580 g_assert (cfg->cbb == first_bb);
4582 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4583 /* Replace the original instruction with the new code sequence */
4585 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4586 first_bb->code = first_bb->last_ins = NULL;
4587 first_bb->in_count = first_bb->out_count = 0;
4588 cfg->cbb = first_bb;
4595 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4605 #ifdef MONO_ARCH_SOFT_FLOAT
4608 * mono_handle_soft_float:
4610 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4611 * similar to long support on 32 bit platforms. 32 bit float values require special
4612 * handling when used as locals, arguments, and in calls.
4613 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4616 mono_handle_soft_float (MonoCompile *cfg)
4618 MonoBasicBlock *bb, *first_bb;
4621 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4625 * Create a dummy bblock and emit code into it so we can use the normal
4626 * code generation macros.
4628 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4629 first_bb = cfg->cbb;
4631 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4633 MonoInst *prev = NULL;
4636 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4638 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4644 for (ins = bb->code; ins; ins = ins->next) {
4645 const char *spec = INS_INFO (ins->opcode);
4647 /* Most fp operations are handled automatically by opcode emulation */
4649 switch (ins->opcode) {
4652 d.vald = *(double*)ins->inst_p0;
4653 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4658 /* We load the r8 value */
4659 d.vald = *(float*)ins->inst_p0;
4660 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4664 ins->opcode = OP_LMOVE;
4667 ins->opcode = OP_MOVE;
4668 ins->sreg1 = ins->sreg1 + 1;
4671 ins->opcode = OP_MOVE;
4672 ins->sreg1 = ins->sreg1 + 2;
4675 int reg = ins->sreg1;
4677 ins->opcode = OP_SETLRET;
4679 ins->sreg1 = reg + 1;
4680 ins->sreg2 = reg + 2;
4683 case OP_LOADR8_MEMBASE:
4684 ins->opcode = OP_LOADI8_MEMBASE;
4686 case OP_STORER8_MEMBASE_REG:
4687 ins->opcode = OP_STOREI8_MEMBASE_REG;
4689 case OP_STORER4_MEMBASE_REG: {
4690 MonoInst *iargs [2];
4693 /* Arg 1 is the double value */
4694 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4695 iargs [0]->dreg = ins->sreg1;
4697 /* Arg 2 is the address to store to */
4698 addr_reg = mono_alloc_preg (cfg);
4699 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4700 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4704 case OP_LOADR4_MEMBASE: {
4705 MonoInst *iargs [1];
4709 addr_reg = mono_alloc_preg (cfg);
4710 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4711 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4712 conv->dreg = ins->dreg;
4717 case OP_FCALL_MEMBASE: {
4718 MonoCallInst *call = (MonoCallInst*)ins;
4719 if (call->signature->ret->type == MONO_TYPE_R4) {
4720 MonoCallInst *call2;
4721 MonoInst *iargs [1];
4724 /* Convert the call into a call returning an int */
4725 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4726 memcpy (call2, call, sizeof (MonoCallInst));
4727 switch (ins->opcode) {
4729 call2->inst.opcode = OP_CALL;
4732 call2->inst.opcode = OP_CALL_REG;
4734 case OP_FCALL_MEMBASE:
4735 call2->inst.opcode = OP_CALL_MEMBASE;
4738 g_assert_not_reached ();
4740 call2->inst.dreg = mono_alloc_ireg (cfg);
4741 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4743 /* FIXME: Optimize this */
4745 /* Emit an r4->r8 conversion */
4746 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4747 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4748 conv->dreg = ins->dreg;
4750 switch (ins->opcode) {
4752 ins->opcode = OP_LCALL;
4755 ins->opcode = OP_LCALL_REG;
4757 case OP_FCALL_MEMBASE:
4758 ins->opcode = OP_LCALL_MEMBASE;
4761 g_assert_not_reached ();
4767 MonoJitICallInfo *info;
4768 MonoInst *iargs [2];
4769 MonoInst *call, *cmp, *br;
4771 /* Convert fcompare+fbcc to icall+icompare+beq */
4773 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4776 /* Create dummy MonoInst's for the arguments */
4777 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4778 iargs [0]->dreg = ins->sreg1;
4779 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4780 iargs [1]->dreg = ins->sreg2;
4782 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4784 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4785 cmp->sreg1 = call->dreg;
4787 MONO_ADD_INS (cfg->cbb, cmp);
4789 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4790 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4791 br->inst_true_bb = ins->next->inst_true_bb;
4792 br->inst_false_bb = ins->next->inst_false_bb;
4793 MONO_ADD_INS (cfg->cbb, br);
4795 /* The call sequence might include fp ins */
4798 /* Skip fbcc or fccc */
4799 NULLIFY_INS (ins->next);
4807 MonoJitICallInfo *info;
4808 MonoInst *iargs [2];
4811 /* Convert fccc to icall+icompare+iceq */
4813 info = mono_find_jit_opcode_emulation (ins->opcode);
4816 /* Create dummy MonoInst's for the arguments */
4817 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4818 iargs [0]->dreg = ins->sreg1;
4819 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4820 iargs [1]->dreg = ins->sreg2;
4822 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4825 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4827 /* The call sequence might include fp ins */
4832 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4833 mono_print_ins (ins);
4834 g_assert_not_reached ();
4839 g_assert (cfg->cbb == first_bb);
4841 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4842 /* Replace the original instruction with the new code sequence */
4844 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4845 first_bb->code = first_bb->last_ins = NULL;
4846 first_bb->in_count = first_bb->out_count = 0;
4847 cfg->cbb = first_bb;
4854 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4857 mono_decompose_long_opts (cfg);
4863 * mono_method_to_ir: translates IL into basic blocks containing trees
4866 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4867 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4868 guint inline_offset, gboolean is_virtual_call)
4870 MonoInst *ins, **sp, **stack_start;
4871 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4872 MonoMethod *cmethod, *method_definition;
4873 MonoInst **arg_array;
4874 MonoMethodHeader *header;
4876 guint32 token, ins_flag;
4878 MonoClass *constrained_call = NULL;
4879 unsigned char *ip, *end, *target, *err_pos;
4880 static double r8_0 = 0.0;
4881 MonoMethodSignature *sig;
4882 MonoGenericContext *generic_context = NULL;
4883 MonoGenericContainer *generic_container = NULL;
4884 MonoType **param_types;
4885 GList *bb_recheck = NULL, *tmp;
4886 int i, n, start_new_bblock, dreg;
4887 int num_calls = 0, inline_costs = 0;
4888 int breakpoint_id = 0;
4890 MonoBoolean security, pinvoke;
4891 MonoSecurityManager* secman = NULL;
4892 MonoDeclSecurityActions actions;
4893 GSList *class_inits = NULL;
4894 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4897 /* serialization and xdomain stuff may need access to private fields and methods */
4898 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4899 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4900 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4901 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4902 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4903 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4905 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4907 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4908 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4909 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4910 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4912 image = method->klass->image;
4913 header = mono_method_get_header (method);
4914 generic_container = mono_method_get_generic_container (method);
4915 sig = mono_method_signature (method);
4916 num_args = sig->hasthis + sig->param_count;
4917 ip = (unsigned char*)header->code;
4918 cfg->cil_start = ip;
4919 end = ip + header->code_size;
4920 mono_jit_stats.cil_code_size += header->code_size;
4922 method_definition = method;
4923 while (method_definition->is_inflated) {
4924 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4925 method_definition = imethod->declaring;
4928 /* SkipVerification is not allowed if core-clr is enabled */
4929 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4931 dont_verify_stloc = TRUE;
4934 if (!dont_verify && mini_method_verify (cfg, method_definition))
4935 goto exception_exit;
4937 if (sig->is_inflated)
4938 generic_context = mono_method_get_context (method);
4939 else if (generic_container)
4940 generic_context = &generic_container->context;
4941 cfg->generic_context = generic_context;
4943 if (!cfg->generic_sharing_context)
4944 g_assert (!sig->has_type_parameters);
4946 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4947 g_assert (method->is_inflated);
4948 g_assert (mono_method_get_context (method)->method_inst);
4950 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4951 g_assert (sig->generic_param_count);
4953 if (cfg->method == method) {
4954 cfg->real_offset = 0;
4956 cfg->real_offset = inline_offset;
4959 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4960 cfg->cil_offset_to_bb_len = header->code_size;
4962 cfg->current_method = method;
4964 if (cfg->verbose_level > 2)
4965 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4967 dont_inline = g_list_prepend (dont_inline, method);
4968 if (cfg->method == method) {
4970 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4971 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4974 NEW_BBLOCK (cfg, start_bblock);
4975 cfg->bb_entry = start_bblock;
4976 start_bblock->cil_code = NULL;
4977 start_bblock->cil_length = 0;
4980 NEW_BBLOCK (cfg, end_bblock);
4981 cfg->bb_exit = end_bblock;
4982 end_bblock->cil_code = NULL;
4983 end_bblock->cil_length = 0;
4984 g_assert (cfg->num_bblocks == 2);
4986 arg_array = cfg->args;
4988 if (header->num_clauses) {
4989 cfg->spvars = g_hash_table_new (NULL, NULL);
4990 cfg->exvars = g_hash_table_new (NULL, NULL);
4992 /* handle exception clauses */
4993 for (i = 0; i < header->num_clauses; ++i) {
4994 MonoBasicBlock *try_bb;
4995 MonoExceptionClause *clause = &header->clauses [i];
4996 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4997 try_bb->real_offset = clause->try_offset;
4998 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4999 tblock->real_offset = clause->handler_offset;
5000 tblock->flags |= BB_EXCEPTION_HANDLER;
5002 link_bblock (cfg, try_bb, tblock);
5004 if (*(ip + clause->handler_offset) == CEE_POP)
5005 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5007 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5008 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5009 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5010 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5011 MONO_ADD_INS (tblock, ins);
5013 /* todo: is a fault block unsafe to optimize? */
5014 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5015 tblock->flags |= BB_EXCEPTION_UNSAFE;
5019 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5021 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5023 /* catch and filter blocks get the exception object on the stack */
5024 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5025 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5026 MonoInst *dummy_use;
5028 /* mostly like handle_stack_args (), but just sets the input args */
5029 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5030 tblock->in_scount = 1;
5031 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5032 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5035 * Add a dummy use for the exvar so its liveness info will be
5039 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5041 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5042 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5043 tblock->real_offset = clause->data.filter_offset;
5044 tblock->in_scount = 1;
5045 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5046 /* The filter block shares the exvar with the handler block */
5047 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5048 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5049 MONO_ADD_INS (tblock, ins);
5053 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5054 clause->data.catch_class &&
5055 cfg->generic_sharing_context &&
5056 mono_class_check_context_used (clause->data.catch_class)) {
5057 if (mono_method_get_context (method)->method_inst)
5058 GENERIC_SHARING_FAILURE (CEE_NOP);
5061 * In shared generic code with catch
5062 * clauses containing type variables
5063 * the exception handling code has to
5064 * be able to get to the rgctx.
5065 * Therefore we have to make sure that
5066 * the vtable/mrgctx argument (for
5067 * static or generic methods) or the
5068 * "this" argument (for non-static
5069 * methods) are live.
5071 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5072 mini_method_get_context (method)->method_inst) {
5073 mono_get_vtable_var (cfg);
5075 MonoInst *dummy_use;
5077 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5082 arg_array = alloca (sizeof (MonoInst *) * num_args);
5083 cfg->cbb = start_bblock;
5084 mono_save_args (cfg, sig, inline_args, arg_array);
5085 cfg->args = arg_array;
5088 /* FIRST CODE BLOCK */
5089 NEW_BBLOCK (cfg, bblock);
5090 bblock->cil_code = ip;
5094 ADD_BBLOCK (cfg, bblock);
5096 if (cfg->method == method) {
5097 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5098 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5099 MONO_INST_NEW (cfg, ins, OP_BREAK);
5100 MONO_ADD_INS (bblock, ins);
5104 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5105 secman = mono_security_manager_get_methods ();
5107 security = (secman && mono_method_has_declsec (method));
5108 /* at this point having security doesn't mean we have any code to generate */
5109 if (security && (cfg->method == method)) {
5110 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5111 * And we do not want to enter the next section (with allocation) if we
5112 * have nothing to generate */
5113 security = mono_declsec_get_demands (method, &actions);
5116 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5117 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5119 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5120 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5121 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5123 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5124 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5128 mono_custom_attrs_free (custom);
5131 custom = mono_custom_attrs_from_class (wrapped->klass);
5132 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5136 mono_custom_attrs_free (custom);
5139 /* not a P/Invoke after all */
5144 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5145 /* we use a separate basic block for the initialization code */
5146 NEW_BBLOCK (cfg, init_localsbb);
5147 cfg->bb_init = init_localsbb;
5148 init_localsbb->real_offset = cfg->real_offset;
5149 start_bblock->next_bb = init_localsbb;
5150 init_localsbb->next_bb = bblock;
5151 link_bblock (cfg, start_bblock, init_localsbb);
5152 link_bblock (cfg, init_localsbb, bblock);
5154 cfg->cbb = init_localsbb;
5156 start_bblock->next_bb = bblock;
5157 link_bblock (cfg, start_bblock, bblock);
5160 /* at this point we know, if security is TRUE, that some code needs to be generated */
5161 if (security && (cfg->method == method)) {
5164 mono_jit_stats.cas_demand_generation++;
5166 if (actions.demand.blob) {
5167 /* Add code for SecurityAction.Demand */
5168 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5169 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5170 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5171 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5173 if (actions.noncasdemand.blob) {
5174 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5175 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5176 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5177 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5178 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5179 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5181 if (actions.demandchoice.blob) {
5182 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5183 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5184 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5185 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5186 mono_emit_method_call (cfg, secman->demandchoice, mono_method_signature (secman->demandchoice), args, NULL);
5190 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5192 mono_emit_method_call (cfg, secman->demandunmanaged, mono_method_signature (secman->demandunmanaged), NULL, NULL);
5195 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5196 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5197 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5198 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5199 if (!(method->klass && method->klass->image &&
5200 mono_security_core_clr_is_platform_image (method->klass->image))) {
5201 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5205 if (!method_is_safe (method))
5206 emit_throw_verification_exception (cfg, bblock, ip);
5209 if (header->code_size == 0)
5212 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5217 if (cfg->method == method)
5218 mono_debug_init_method (cfg, bblock, breakpoint_id);
5220 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5222 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5223 for (n = 0; n < sig->param_count; ++n)
5224 param_types [n + sig->hasthis] = sig->params [n];
5225 cfg->arg_types = param_types;
5226 for (n = 0; n < header->num_locals; ++n) {
5227 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5232 /* add a check for this != NULL to inlined methods */
5233 if (is_virtual_call) {
5236 NEW_ARGLOAD (cfg, arg_ins, 0);
5237 MONO_ADD_INS (cfg->cbb, arg_ins);
5238 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5239 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5240 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5243 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5244 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5247 start_new_bblock = 0;
5251 if (cfg->method == method)
5252 cfg->real_offset = ip - header->code;
5254 cfg->real_offset = inline_offset;
5259 if (start_new_bblock) {
5260 bblock->cil_length = ip - bblock->cil_code;
5261 if (start_new_bblock == 2) {
5262 g_assert (ip == tblock->cil_code);
5264 GET_BBLOCK (cfg, tblock, ip);
5266 bblock->next_bb = tblock;
5269 start_new_bblock = 0;
5270 for (i = 0; i < bblock->in_scount; ++i) {
5271 if (cfg->verbose_level > 3)
5272 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5273 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5277 g_slist_free (class_inits);
5280 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5281 link_bblock (cfg, bblock, tblock);
5282 if (sp != stack_start) {
5283 handle_stack_args (cfg, stack_start, sp - stack_start);
5285 CHECK_UNVERIFIABLE (cfg);
5287 bblock->next_bb = tblock;
5290 for (i = 0; i < bblock->in_scount; ++i) {
5291 if (cfg->verbose_level > 3)
5292 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5293 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5296 g_slist_free (class_inits);
5301 bblock->real_offset = cfg->real_offset;
5303 if ((cfg->method == method) && cfg->coverage_info) {
5304 guint32 cil_offset = ip - header->code;
5305 cfg->coverage_info->data [cil_offset].cil_code = ip;
5307 /* TODO: Use an increment here */
5308 #if defined(__i386__)
5309 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5310 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5312 MONO_ADD_INS (cfg->cbb, ins);
5314 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5315 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5319 if (cfg->verbose_level > 3)
5320 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5325 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5327 MONO_ADD_INS (bblock, ins);
5333 CHECK_STACK_OVF (1);
5334 n = (*ip)-CEE_LDARG_0;
5336 EMIT_NEW_ARGLOAD (cfg, ins, n);
5344 CHECK_STACK_OVF (1);
5345 n = (*ip)-CEE_LDLOC_0;
5347 EMIT_NEW_LOCLOAD (cfg, ins, n);
5358 n = (*ip)-CEE_STLOC_0;
5361 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5364 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5365 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5366 /* Optimize reg-reg moves away */
5368 * Can't optimize other opcodes, since sp[0] might point to
5369 * the last ins of a decomposed opcode.
5371 sp [0]->dreg = (cfg)->locals [n]->dreg;
5373 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5381 CHECK_STACK_OVF (1);
5384 EMIT_NEW_ARGLOAD (cfg, ins, n);
5390 CHECK_STACK_OVF (1);
5393 NEW_ARGLOADA (cfg, ins, n);
5394 MONO_ADD_INS (cfg->cbb, ins);
5404 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5406 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5411 CHECK_STACK_OVF (1);
5414 EMIT_NEW_LOCLOAD (cfg, ins, n);
5420 CHECK_STACK_OVF (1);
5421 CHECK_LOCAL (ip [1]);
5424 * ldloca inhibits many optimizations so try to get rid of it in common
5427 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5428 gboolean skip = FALSE;
5430 /* From the INITOBJ case */
5431 token = read32 (ip + 4);
5432 klass = mini_get_class (method, token, generic_context);
5433 CHECK_TYPELOAD (klass);
5434 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
5435 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
5437 if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5438 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5439 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5440 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5452 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5460 CHECK_LOCAL (ip [1]);
5461 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5463 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5468 CHECK_STACK_OVF (1);
5469 EMIT_NEW_PCONST (cfg, ins, NULL);
5470 ins->type = STACK_OBJ;
5475 CHECK_STACK_OVF (1);
5476 EMIT_NEW_ICONST (cfg, ins, -1);
5489 CHECK_STACK_OVF (1);
5490 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5496 CHECK_STACK_OVF (1);
5498 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5504 CHECK_STACK_OVF (1);
5505 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5511 CHECK_STACK_OVF (1);
5512 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5513 ins->type = STACK_I8;
5514 ins->dreg = alloc_dreg (cfg, STACK_I8);
5516 ins->inst_l = (gint64)read64 (ip);
5517 MONO_ADD_INS (bblock, ins);
5523 /* FIXME: we should really allocate this only late in the compilation process */
5524 mono_domain_lock (cfg->domain);
5525 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5526 mono_domain_unlock (cfg->domain);
5528 CHECK_STACK_OVF (1);
5529 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5530 ins->type = STACK_R8;
5531 ins->dreg = alloc_dreg (cfg, STACK_R8);
5535 MONO_ADD_INS (bblock, ins);
5543 /* FIXME: we should really allocate this only late in the compilation process */
5544 mono_domain_lock (cfg->domain);
5545 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5546 mono_domain_unlock (cfg->domain);
5548 CHECK_STACK_OVF (1);
5549 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5550 ins->type = STACK_R8;
5551 ins->dreg = alloc_dreg (cfg, STACK_R8);
5555 MONO_ADD_INS (bblock, ins);
5562 MonoInst *temp, *store;
5564 CHECK_STACK_OVF (1);
5568 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5569 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5571 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5574 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5587 if (sp [0]->type == STACK_R8)
5588 /* we need to pop the value from the x86 FP stack */
5589 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5596 if (stack_start != sp)
5598 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5599 ins = (MonoInst*)call;
5600 token = read32 (ip + 1);
5601 /* FIXME: check the signature matches */
5602 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5607 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5608 GENERIC_SHARING_FAILURE (CEE_JMP);
5610 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5611 if (check_linkdemand (cfg, method, cmethod))
5613 CHECK_CFG_EXCEPTION;
5616 ins->inst_p0 = cmethod;
5617 MONO_ADD_INS (bblock, ins);
5619 start_new_bblock = 1;
5622 cfg->disable_aot = 1;
5627 case CEE_CALLVIRT: {
5628 MonoInst *addr = NULL;
5629 MonoMethodSignature *fsig = NULL;
5631 int virtual = *ip == CEE_CALLVIRT;
5632 int calli = *ip == CEE_CALLI;
5633 gboolean pass_imt_from_rgctx = FALSE;
5634 MonoInst *imt_arg = NULL;
5635 gboolean pass_vtable = FALSE;
5636 gboolean pass_mrgctx = FALSE;
5637 MonoInst *vtable_arg = NULL;
5638 gboolean check_this = FALSE;
5641 token = read32 (ip + 1);
5648 if (method->wrapper_type != MONO_WRAPPER_NONE)
5649 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5651 fsig = mono_metadata_parse_signature (image, token);
5653 n = fsig->param_count + fsig->hasthis;
5655 MonoMethod *cil_method;
5657 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5658 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5659 cil_method = cmethod;
5660 } else if (constrained_call) {
5661 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5663 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5664 cil_method = cmethod;
5669 if (!dont_verify && !cfg->skip_visibility) {
5670 MonoMethod *target_method = cil_method;
5671 if (method->is_inflated) {
5672 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5674 if (!mono_method_can_access_method (method_definition, target_method) &&
5675 !mono_method_can_access_method (method, cil_method))
5676 METHOD_ACCESS_FAILURE;
5679 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5680 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5682 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5683 /* MS.NET seems to silently convert this to a callvirt */
5686 if (!cmethod->klass->inited)
5687 if (!mono_class_init (cmethod->klass))
5690 if (mono_method_signature (cmethod)->pinvoke) {
5691 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
5692 fsig = mono_method_signature (wrapper);
5693 } else if (constrained_call) {
5694 fsig = mono_method_signature (cmethod);
5696 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5699 mono_save_token_info (cfg, image, token, cmethod);
5701 n = fsig->param_count + fsig->hasthis;
5703 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5704 if (check_linkdemand (cfg, method, cmethod))
5706 CHECK_CFG_EXCEPTION;
5709 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5710 mini_class_is_system_array (cmethod->klass)) {
5711 array_rank = cmethod->klass->rank;
5714 if (cmethod->string_ctor)
5715 g_assert_not_reached ();
5718 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5721 if (!cfg->generic_sharing_context && cmethod)
5722 g_assert (!mono_method_check_context_used (cmethod));
5726 //g_assert (!virtual || fsig->hasthis);
5730 if (constrained_call) {
5732 * We have the `constrained.' prefix opcode.
5734 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5738 * The type parameter is instantiated as a valuetype,
5739 * but that type doesn't override the method we're
5740 * calling, so we need to box `this'.
5742 dreg = alloc_dreg (cfg, STACK_VTYPE);
5743 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5744 ins->klass = constrained_call;
5745 sp [0] = handle_box (cfg, ins, constrained_call);
5746 } else if (!constrained_call->valuetype) {
5747 int dreg = alloc_preg (cfg);
5750 * The type parameter is instantiated as a reference
5751 * type. We have a managed pointer on the stack, so
5752 * we need to dereference it here.
5754 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5755 ins->type = STACK_OBJ;
5757 } else if (cmethod->klass->valuetype)
5759 constrained_call = NULL;
5762 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5766 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5767 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5768 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5769 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5770 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5773 * Pass vtable iff target method might
5774 * be shared, which means that sharing
5775 * is enabled for its class and its
5776 * context is sharable (and it's not a
5779 if (sharing_enabled && context_sharable &&
5780 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5784 if (cmethod && mini_method_get_context (cmethod) &&
5785 mini_method_get_context (cmethod)->method_inst) {
5786 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5787 MonoGenericContext *context = mini_method_get_context (cmethod);
5788 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5790 g_assert (!pass_vtable);
5792 if (sharing_enabled && context_sharable)
5796 if (cfg->generic_sharing_context && cmethod) {
5797 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5799 context_used = mono_method_check_context_used (cmethod);
5801 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5802 /* Generic method interface
5803 calls are resolved via a
5804 helper function and don't
5806 if (!cmethod_context || !cmethod_context->method_inst)
5807 pass_imt_from_rgctx = TRUE;
5811 * If a shared method calls another
5812 * shared method then the caller must
5813 * have a generic sharing context
5814 * because the magic trampoline
5815 * requires it. FIXME: We shouldn't
5816 * have to force the vtable/mrgctx
5817 * variable here. Instead there
5818 * should be a flag in the cfg to
5819 * request a generic sharing context.
5821 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5822 mono_get_vtable_var (cfg);
5827 GENERIC_SHARING_FAILURE (*ip);
5833 EMIT_GET_RGCTX (rgctx, context_used);
5834 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5836 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5838 CHECK_TYPELOAD (cmethod->klass);
5839 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5844 g_assert (!vtable_arg);
5849 EMIT_GET_RGCTX (rgctx, context_used);
5850 vtable_arg = emit_get_rgctx_method_rgctx (cfg, context_used, rgctx, cmethod);
5852 MonoMethodRuntimeGenericContext *mrgctx;
5854 mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
5855 mini_method_get_context (cmethod)->method_inst);
5857 EMIT_NEW_PCONST (cfg, vtable_arg, mrgctx);
5860 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5861 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5868 if (pass_imt_from_rgctx) {
5871 g_assert (!pass_vtable);
5874 EMIT_GET_RGCTX (rgctx, context_used);
5875 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5876 MONO_RGCTX_INFO_METHOD);
5882 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5883 check->sreg1 = sp [0]->dreg;
5884 MONO_ADD_INS (cfg->cbb, check);
5887 /* Calling virtual generic methods */
5888 if (cmethod && virtual &&
5889 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5890 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5891 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5892 mono_method_signature (cmethod)->generic_param_count) {
5893 MonoInst *this_temp, *this_arg_temp, *store;
5894 MonoInst *iargs [4];
5896 g_assert (mono_method_signature (cmethod)->is_inflated);
5898 /* Prevent inlining of methods that contain indirect calls */
5901 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5902 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5903 MONO_ADD_INS (bblock, store);
5905 /* FIXME: This should be a managed pointer */
5906 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5908 /* Because of the PCONST below */
5909 cfg->disable_aot = TRUE;
5910 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5914 EMIT_GET_RGCTX (rgctx, context_used);
5915 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5916 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5917 addr = mono_emit_jit_icall (cfg,
5918 mono_helper_compile_generic_method_wo_context, iargs);
5920 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5921 EMIT_NEW_PCONST (cfg, iargs [2], mono_method_get_context (cmethod));
5922 EMIT_NEW_TEMPLOADA (cfg, iargs [3], this_arg_temp->inst_c0);
5923 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5926 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5928 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5929 if (!MONO_TYPE_IS_VOID (fsig->ret))
5938 /* FIXME: runtime generic context pointer for jumps? */
5939 /* FIXME: handle this for generic sharing eventually */
5940 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5941 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5944 /* FIXME: runtime generic context pointer for jumps? */
5945 GENERIC_SHARING_FAILURE (*ip);
5947 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5950 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5951 call->tail_call = TRUE;
5952 call->method = cmethod;
5953 call->signature = mono_method_signature (cmethod);
5956 /* Handle tail calls similarly to calls */
5957 call->inst.opcode = OP_TAILCALL;
5959 mono_arch_emit_call (cfg, call);
5962 * We implement tail calls by storing the actual arguments into the
5963 * argument variables, then emitting a CEE_JMP.
5965 for (i = 0; i < n; ++i) {
5966 /* Prevent argument from being register allocated */
5967 arg_array [i]->flags |= MONO_INST_VOLATILE;
5968 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5973 cfg->disable_aot = 1;
5975 ins = (MonoInst*)call;
5976 ins->inst_p0 = cmethod;
5977 ins->inst_p1 = arg_array [0];
5978 MONO_ADD_INS (bblock, ins);
5979 link_bblock (cfg, bblock, end_bblock);
5980 start_new_bblock = 1;
5981 /* skip CEE_RET as well */
5987 /* Conversion to a JIT intrinsic */
5988 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5989 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5990 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6001 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6002 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6003 mono_method_check_inlining (cfg, cmethod) &&
6004 !g_list_find (dont_inline, cmethod)) {
6006 gboolean allways = FALSE;
6008 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6009 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6010 /* Prevent inlining of methods that call wrappers */
6012 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
6016 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6018 cfg->real_offset += 5;
6021 if (!MONO_TYPE_IS_VOID (fsig->ret))
6022 /* *sp is already set by inline_method */
6025 inline_costs += costs;
6031 inline_costs += 10 * num_calls++;
6033 /* Tail recursion elimination */
6034 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6035 gboolean has_vtargs = FALSE;
6038 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6041 /* keep it simple */
6042 for (i = fsig->param_count - 1; i >= 0; i--) {
6043 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6048 for (i = 0; i < n; ++i)
6049 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6050 MONO_INST_NEW (cfg, ins, OP_BR);
6051 MONO_ADD_INS (bblock, ins);
6052 tblock = start_bblock->out_bb [0];
6053 link_bblock (cfg, bblock, tblock);
6054 ins->inst_target_bb = tblock;
6055 start_new_bblock = 1;
6057 /* skip the CEE_RET, too */
6058 if (ip_in_bb (cfg, bblock, ip + 5))
6068 /* Generic sharing */
6069 /* FIXME: only do this for generic methods if
6070 they are not shared! */
6072 (cmethod->klass->valuetype ||
6073 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6074 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6075 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6076 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6077 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6078 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6083 g_assert (cfg->generic_sharing_context && cmethod);
6087 * We are compiling a call to a
6088 * generic method from shared code,
6089 * which means that we have to look up
6090 * the method in the rgctx and do an
6094 EMIT_GET_RGCTX (rgctx, context_used);
6095 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6098 /* Indirect calls */
6100 g_assert (!imt_arg);
6102 if (*ip == CEE_CALL)
6103 g_assert (context_used);
6104 else if (*ip == CEE_CALLI)
6105 g_assert (!vtable_arg);
6107 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6108 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6110 /* Prevent inlining of methods with indirect calls */
6114 #ifdef MONO_ARCH_RGCTX_REG
6116 int rgctx_reg = mono_alloc_preg (cfg);
6118 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6119 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6120 call = (MonoCallInst*)ins;
6121 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6123 GENERIC_SHARING_FAILURE (*ip);
6126 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6128 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6129 if (fsig->pinvoke && !fsig->ret->byref) {
6133 * Native code might return non register sized integers
6134 * without initializing the upper bits.
6136 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6137 case OP_LOADI1_MEMBASE:
6138 widen_op = OP_ICONV_TO_I1;
6140 case OP_LOADU1_MEMBASE:
6141 widen_op = OP_ICONV_TO_U1;
6143 case OP_LOADI2_MEMBASE:
6144 widen_op = OP_ICONV_TO_I2;
6146 case OP_LOADU2_MEMBASE:
6147 widen_op = OP_ICONV_TO_U2;
6153 if (widen_op != -1) {
6154 int dreg = alloc_preg (cfg);
6157 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6158 widen->type = ins->type;
6175 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6176 if (sp [fsig->param_count]->type == STACK_OBJ) {
6177 MonoInst *iargs [2];
6180 iargs [1] = sp [fsig->param_count];
6182 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6185 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6186 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6187 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6188 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6190 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6193 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6194 if (!cmethod->klass->element_class->valuetype && !readonly)
6195 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6198 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6201 g_assert_not_reached ();
6209 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6211 if (!MONO_TYPE_IS_VOID (fsig->ret))
6222 #ifdef MONO_ARCH_RGCTX_REG
6224 int rgctx_reg = mono_alloc_preg (cfg);
6226 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6227 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6228 call = (MonoCallInst*)ins;
6229 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6231 GENERIC_SHARING_FAILURE (*ip);
6233 } else if (imt_arg) {
6234 ins = (MonoInst*)mono_emit_imt_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6236 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6239 if (!MONO_TYPE_IS_VOID (fsig->ret))
6247 if (cfg->method != method) {
6248 /* return from inlined method */
6250 * If in_count == 0, that means the ret is unreachable due to
6251 * being preceeded by a throw. In that case, inline_method () will
6252 * handle setting the return value
6253 * (test case: test_0_inline_throw ()).
6255 if (return_var && cfg->cbb->in_count) {
6259 //g_assert (returnvar != -1);
6260 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6261 cfg->ret_var_set = TRUE;
6265 MonoType *ret_type = mono_method_signature (method)->ret;
6267 g_assert (!return_var);
6270 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6273 if (!cfg->vret_addr) {
6276 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6278 EMIT_NEW_RETLOADA (cfg, ret_addr);
6280 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6281 ins->klass = mono_class_from_mono_type (ret_type);
6284 #ifdef MONO_ARCH_SOFT_FLOAT
6285 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6286 MonoInst *iargs [1];
6290 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6291 mono_arch_emit_setret (cfg, method, conv);
6293 mono_arch_emit_setret (cfg, method, *sp);
6296 mono_arch_emit_setret (cfg, method, *sp);
6301 if (sp != stack_start)
6303 MONO_INST_NEW (cfg, ins, OP_BR);
6305 ins->inst_target_bb = end_bblock;
6306 MONO_ADD_INS (bblock, ins);
6307 link_bblock (cfg, bblock, end_bblock);
6308 start_new_bblock = 1;
6312 MONO_INST_NEW (cfg, ins, OP_BR);
6314 target = ip + 1 + (signed char)(*ip);
6316 GET_BBLOCK (cfg, tblock, target);
6317 link_bblock (cfg, bblock, tblock);
6318 CHECK_BBLOCK (target, ip, tblock);
6319 ins->inst_target_bb = tblock;
6320 if (sp != stack_start) {
6321 handle_stack_args (cfg, stack_start, sp - stack_start);
6323 CHECK_UNVERIFIABLE (cfg);
6325 MONO_ADD_INS (bblock, ins);
6326 start_new_bblock = 1;
6327 inline_costs += BRANCH_COST;
6341 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6343 target = ip + 1 + *(signed char*)ip;
6349 inline_costs += BRANCH_COST;
6353 MONO_INST_NEW (cfg, ins, OP_BR);
6356 target = ip + 4 + (gint32)read32(ip);
6358 GET_BBLOCK (cfg, tblock, target);
6359 link_bblock (cfg, bblock, tblock);
6360 CHECK_BBLOCK (target, ip, tblock);
6361 ins->inst_target_bb = tblock;
6362 if (sp != stack_start) {
6363 handle_stack_args (cfg, stack_start, sp - stack_start);
6365 CHECK_UNVERIFIABLE (cfg);
6368 MONO_ADD_INS (bblock, ins);
6370 start_new_bblock = 1;
6371 inline_costs += BRANCH_COST;
6378 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6379 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6380 guint32 opsize = is_short ? 1 : 4;
6382 CHECK_OPSIZE (opsize);
6384 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6387 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6392 GET_BBLOCK (cfg, tblock, target);
6393 link_bblock (cfg, bblock, tblock);
6394 CHECK_BBLOCK (target, ip, tblock);
6395 GET_BBLOCK (cfg, tblock, ip);
6396 link_bblock (cfg, bblock, tblock);
6398 if (sp != stack_start) {
6399 handle_stack_args (cfg, stack_start, sp - stack_start);
6400 CHECK_UNVERIFIABLE (cfg);
6403 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6404 cmp->sreg1 = sp [0]->dreg;
6405 type_from_op (cmp, sp [0], NULL);
6408 #if SIZEOF_VOID_P == 4
6409 if (cmp->opcode == OP_LCOMPARE_IMM) {
6410 /* Convert it to OP_LCOMPARE */
6411 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6412 ins->type = STACK_I8;
6413 ins->dreg = alloc_dreg (cfg, STACK_I8);
6415 MONO_ADD_INS (bblock, ins);
6416 cmp->opcode = OP_LCOMPARE;
6417 cmp->sreg2 = ins->dreg;
6420 MONO_ADD_INS (bblock, cmp);
6422 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6423 type_from_op (ins, sp [0], NULL);
6424 MONO_ADD_INS (bblock, ins);
6425 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6426 GET_BBLOCK (cfg, tblock, target);
6427 ins->inst_true_bb = tblock;
6428 GET_BBLOCK (cfg, tblock, ip);
6429 ins->inst_false_bb = tblock;
6430 start_new_bblock = 2;
6433 inline_costs += BRANCH_COST;
6448 MONO_INST_NEW (cfg, ins, *ip);
6450 target = ip + 4 + (gint32)read32(ip);
6456 inline_costs += BRANCH_COST;
6460 MonoBasicBlock **targets;
6461 MonoBasicBlock *default_bblock;
6462 MonoJumpInfoBBTable *table;
6464 int offset_reg = alloc_preg (cfg);
6465 int target_reg = alloc_preg (cfg);
6466 int table_reg = alloc_preg (cfg);
6467 int sum_reg = alloc_preg (cfg);
6472 n = read32 (ip + 1);
6475 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6479 CHECK_OPSIZE (n * sizeof (guint32));
6480 target = ip + n * sizeof (guint32);
6482 GET_BBLOCK (cfg, default_bblock, target);
6484 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6485 for (i = 0; i < n; ++i) {
6486 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6487 targets [i] = tblock;
6491 if (sp != stack_start) {
6493 * Link the current bb with the targets as well, so handle_stack_args
6494 * will set their in_stack correctly.
6496 link_bblock (cfg, bblock, default_bblock);
6497 for (i = 0; i < n; ++i)
6498 link_bblock (cfg, bblock, targets [i]);
6500 handle_stack_args (cfg, stack_start, sp - stack_start);
6502 CHECK_UNVERIFIABLE (cfg);
6505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6509 for (i = 0; i < n; ++i)
6510 link_bblock (cfg, bblock, targets [i]);
6512 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6513 table->table = targets;
6514 table->table_size = n;
6517 /* ARM implements SWITCH statements differently */
6518 /* FIXME: Make it use the generic implementation */
6519 /* the backend code will deal with aot vs normal case */
6520 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6521 ins->sreg1 = src1->dreg;
6522 ins->inst_p0 = table;
6523 ins->inst_many_bb = targets;
6524 ins->klass = GUINT_TO_POINTER (n);
6525 MONO_ADD_INS (cfg->cbb, ins);
6527 if (sizeof (gpointer) == 8)
6528 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6532 #if SIZEOF_VOID_P == 8
6533 /* The upper word might not be zero, and we add it to a 64 bit address later */
6534 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6537 if (cfg->compile_aot) {
6538 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6540 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6541 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6542 ins->inst_p0 = table;
6543 ins->dreg = table_reg;
6544 MONO_ADD_INS (cfg->cbb, ins);
6547 /* FIXME: Use load_memindex */
6548 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6550 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6552 start_new_bblock = 1;
6553 inline_costs += (BRANCH_COST * 2);
6573 dreg = alloc_freg (cfg);
6576 dreg = alloc_lreg (cfg);
6579 dreg = alloc_preg (cfg);
6582 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6583 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6584 ins->flags |= ins_flag;
6586 MONO_ADD_INS (bblock, ins);
6601 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6602 ins->flags |= ins_flag;
6604 MONO_ADD_INS (bblock, ins);
6612 MONO_INST_NEW (cfg, ins, (*ip));
6614 ins->sreg1 = sp [0]->dreg;
6615 ins->sreg2 = sp [1]->dreg;
6616 type_from_op (ins, sp [0], sp [1]);
6618 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6620 /* Use the immediate opcodes if possible */
6621 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6622 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6623 if (imm_opcode != -1) {
6624 ins->opcode = imm_opcode;
6625 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6628 sp [1]->opcode = OP_NOP;
6632 MONO_ADD_INS ((cfg)->cbb, (ins));
6635 mono_decompose_opcode (cfg, ins);
6652 MONO_INST_NEW (cfg, ins, (*ip));
6654 ins->sreg1 = sp [0]->dreg;
6655 ins->sreg2 = sp [1]->dreg;
6656 type_from_op (ins, sp [0], sp [1]);
6658 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6659 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6661 /* FIXME: Pass opcode to is_inst_imm */
6663 /* Use the immediate opcodes if possible */
6664 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6667 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6668 if (imm_opcode != -1) {
6669 ins->opcode = imm_opcode;
6670 if (sp [1]->opcode == OP_I8CONST) {
6671 #if SIZEOF_VOID_P == 8
6672 ins->inst_imm = sp [1]->inst_l;
6674 ins->inst_ls_word = sp [1]->inst_ls_word;
6675 ins->inst_ms_word = sp [1]->inst_ms_word;
6679 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6682 sp [1]->opcode = OP_NOP;
6685 MONO_ADD_INS ((cfg)->cbb, (ins));
6688 mono_decompose_opcode (cfg, ins);
6701 case CEE_CONV_OVF_I8:
6702 case CEE_CONV_OVF_U8:
6706 /* Special case this earlier so we have long constants in the IR */
6707 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6708 int data = sp [-1]->inst_c0;
6709 sp [-1]->opcode = OP_I8CONST;
6710 sp [-1]->type = STACK_I8;
6711 #if SIZEOF_VOID_P == 8
6712 if ((*ip) == CEE_CONV_U8)
6713 sp [-1]->inst_c0 = (guint32)data;
6715 sp [-1]->inst_c0 = data;
6717 sp [-1]->inst_ls_word = data;
6718 if ((*ip) == CEE_CONV_U8)
6719 sp [-1]->inst_ms_word = 0;
6721 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6723 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6730 case CEE_CONV_OVF_I4:
6731 case CEE_CONV_OVF_I1:
6732 case CEE_CONV_OVF_I2:
6733 case CEE_CONV_OVF_I:
6734 case CEE_CONV_OVF_U:
6737 if (sp [-1]->type == STACK_R8) {
6738 ADD_UNOP (CEE_CONV_OVF_I8);
6745 case CEE_CONV_OVF_U1:
6746 case CEE_CONV_OVF_U2:
6747 case CEE_CONV_OVF_U4:
6750 if (sp [-1]->type == STACK_R8) {
6751 ADD_UNOP (CEE_CONV_OVF_U8);
6758 case CEE_CONV_OVF_I1_UN:
6759 case CEE_CONV_OVF_I2_UN:
6760 case CEE_CONV_OVF_I4_UN:
6761 case CEE_CONV_OVF_I8_UN:
6762 case CEE_CONV_OVF_U1_UN:
6763 case CEE_CONV_OVF_U2_UN:
6764 case CEE_CONV_OVF_U4_UN:
6765 case CEE_CONV_OVF_U8_UN:
6766 case CEE_CONV_OVF_I_UN:
6767 case CEE_CONV_OVF_U_UN:
6777 case CEE_ADD_OVF_UN:
6779 case CEE_MUL_OVF_UN:
6781 case CEE_SUB_OVF_UN:
6789 token = read32 (ip + 1);
6790 klass = mini_get_class (method, token, generic_context);
6791 CHECK_TYPELOAD (klass);
6793 if (generic_class_is_reference_type (cfg, klass)) {
6794 MonoInst *store, *load;
6795 int dreg = alloc_preg (cfg);
6797 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6798 load->flags |= ins_flag;
6799 MONO_ADD_INS (cfg->cbb, load);
6801 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6802 store->flags |= ins_flag;
6803 MONO_ADD_INS (cfg->cbb, store);
6805 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6817 token = read32 (ip + 1);
6818 klass = mini_get_class (method, token, generic_context);
6819 CHECK_TYPELOAD (klass);
6821 /* Optimize the common ldobj+stloc combination */
6831 loc_index = ip [5] - CEE_STLOC_0;
6838 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6839 CHECK_LOCAL (loc_index);
6841 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6842 ins->dreg = cfg->locals [loc_index]->dreg;
6848 /* Optimize the ldobj+stobj combination */
6849 /* The reference case ends up being a load+store anyway */
6850 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6855 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6862 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6871 CHECK_STACK_OVF (1);
6873 n = read32 (ip + 1);
6875 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6876 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6877 ins->type = STACK_OBJ;
6880 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6881 MonoInst *iargs [1];
6883 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6884 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6886 if (cfg->opt & MONO_OPT_SHARED) {
6887 MonoInst *iargs [3];
6889 if (cfg->compile_aot) {
6890 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6892 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6893 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6894 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6895 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6896 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6898 if (bblock->out_of_line) {
6899 MonoInst *iargs [2];
6901 if (cfg->method->klass->image == mono_defaults.corlib) {
6903 * Avoid relocations in AOT and save some space by using a
6904 * version of helper_ldstr specialized to mscorlib.
6906 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6907 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6909 /* Avoid creating the string object */
6910 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6911 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6912 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6916 if (cfg->compile_aot) {
6917 NEW_LDSTRCONST (cfg, ins, image, n);
6919 MONO_ADD_INS (bblock, ins);
6922 NEW_PCONST (cfg, ins, NULL);
6923 ins->type = STACK_OBJ;
6924 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6926 MONO_ADD_INS (bblock, ins);
6935 MonoInst *iargs [2];
6936 MonoMethodSignature *fsig;
6941 token = read32 (ip + 1);
6942 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6945 fsig = mono_method_get_signature (cmethod, image, token);
6947 mono_save_token_info (cfg, image, token, cmethod);
6949 if (!mono_class_init (cmethod->klass))
6952 if (cfg->generic_sharing_context)
6953 context_used = mono_method_check_context_used (cmethod);
6955 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6956 if (check_linkdemand (cfg, method, cmethod))
6958 CHECK_CFG_EXCEPTION;
6959 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6960 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6963 n = fsig->param_count;
6967 * Generate smaller code for the common newobj <exception> instruction in
6968 * argument checking code.
6970 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6971 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6972 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6973 MonoInst *iargs [3];
6977 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6980 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6984 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6989 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6992 g_assert_not_reached ();
7000 /* move the args to allow room for 'this' in the first position */
7006 /* check_call_signature () requires sp[0] to be set */
7007 this_ins.type = STACK_OBJ;
7009 if (check_call_signature (cfg, fsig, sp))
7014 if (mini_class_is_system_array (cmethod->klass)) {
7015 g_assert (!context_used);
7016 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7017 if (fsig->param_count == 2)
7018 /* Avoid varargs in the common case */
7019 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7021 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7022 } else if (cmethod->string_ctor) {
7023 g_assert (!context_used);
7024 /* we simply pass a null pointer */
7025 EMIT_NEW_PCONST (cfg, *sp, NULL);
7026 /* now call the string ctor */
7027 alloc = mono_emit_method_call (cfg, cmethod, fsig, sp, NULL);
7029 MonoInst* callvirt_this_arg = NULL;
7031 if (cmethod->klass->valuetype) {
7032 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7033 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7034 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7039 * The code generated by mini_emit_virtual_call () expects
7040 * iargs [0] to be a boxed instance, but luckily the vcall
7041 * will be transformed into a normal call there.
7043 } else if (context_used) {
7044 MonoInst *rgctx, *data;
7047 EMIT_GET_RGCTX (rgctx, context_used);
7048 if (cfg->opt & MONO_OPT_SHARED)
7049 rgctx_info = MONO_RGCTX_INFO_KLASS;
7051 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7052 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7054 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7057 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7059 CHECK_TYPELOAD (cmethod->klass);
7062 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7063 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7064 * As a workaround, we call class cctors before allocating objects.
7066 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7067 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7068 mono_emit_native_call (cfg, tramp,
7069 helper_sig_class_init_trampoline,
7071 if (cfg->verbose_level > 2)
7072 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7073 class_inits = g_slist_prepend (class_inits, vtable);
7076 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7081 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7083 /* Now call the actual ctor */
7084 /* Avoid virtual calls to ctors if possible */
7085 if (cmethod->klass->marshalbyref)
7086 callvirt_this_arg = sp [0];
7088 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7089 mono_method_check_inlining (cfg, cmethod) &&
7090 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7091 !g_list_find (dont_inline, cmethod)) {
7094 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7095 cfg->real_offset += 5;
7098 inline_costs += costs - 5;
7101 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7103 } else if (context_used &&
7104 (cmethod->klass->valuetype ||
7105 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7106 MonoInst *rgctx, *cmethod_addr;
7108 g_assert (!callvirt_this_arg);
7110 EMIT_GET_RGCTX (rgctx, context_used);
7111 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7112 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7114 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7117 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7121 if (alloc == NULL) {
7123 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7124 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7138 token = read32 (ip + 1);
7139 klass = mini_get_class (method, token, generic_context);
7140 CHECK_TYPELOAD (klass);
7141 if (sp [0]->type != STACK_OBJ)
7144 if (cfg->generic_sharing_context)
7145 context_used = mono_class_check_context_used (klass);
7148 MonoInst *rgctx, *args [2];
7150 g_assert (!method->klass->valuetype);
7156 EMIT_GET_RGCTX (rgctx, context_used);
7157 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7158 MONO_RGCTX_INFO_KLASS);
7160 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7164 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7165 MonoMethod *mono_castclass;
7166 MonoInst *iargs [1];
7169 mono_castclass = mono_marshal_get_castclass (klass);
7172 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7173 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7174 g_assert (costs > 0);
7177 cfg->real_offset += 5;
7182 inline_costs += costs;
7185 ins = handle_castclass (cfg, klass, *sp);
7195 token = read32 (ip + 1);
7196 klass = mini_get_class (method, token, generic_context);
7197 CHECK_TYPELOAD (klass);
7198 if (sp [0]->type != STACK_OBJ)
7201 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
7202 GENERIC_SHARING_FAILURE (CEE_ISINST);
7204 if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7206 MonoMethod *mono_isinst;
7207 MonoInst *iargs [1];
7210 mono_isinst = mono_marshal_get_isinst (klass);
7213 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7214 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7215 g_assert (costs > 0);
7218 cfg->real_offset += 5;
7223 inline_costs += costs;
7226 ins = handle_isinst (cfg, klass, *sp);
7232 case CEE_UNBOX_ANY: {
7233 MonoInst *rgctx = NULL;
7238 token = read32 (ip + 1);
7239 klass = mini_get_class (method, token, generic_context);
7240 CHECK_TYPELOAD (klass);
7242 if (cfg->generic_sharing_context)
7243 context_used = mono_class_check_context_used (klass);
7245 if (generic_class_is_reference_type (cfg, klass)) {
7248 MonoInst *iargs [2];
7250 g_assert (!method->klass->valuetype);
7255 EMIT_GET_RGCTX (rgctx, context_used);
7256 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7257 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7261 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7262 MonoMethod *mono_castclass;
7263 MonoInst *iargs [1];
7266 mono_castclass = mono_marshal_get_castclass (klass);
7269 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7270 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7272 g_assert (costs > 0);
7275 cfg->real_offset += 5;
7279 inline_costs += costs;
7281 ins = handle_castclass (cfg, klass, *sp);
7290 EMIT_GET_RGCTX (rgctx, context_used);
7292 if (mono_class_is_nullable (klass)) {
7293 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7300 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7306 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7314 int context_used = 0;
7320 token = read32 (ip + 1);
7321 klass = mini_get_class (method, token, generic_context);
7322 CHECK_TYPELOAD (klass);
7324 if (cfg->generic_sharing_context) {
7325 context_used = mono_class_check_context_used (klass);
7327 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7328 GENERIC_SHARING_FAILURE (*ip);
7331 if (generic_class_is_reference_type (cfg, klass)) {
7337 if (klass == mono_defaults.void_class)
7339 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7341 /* frequent check in generic code: box (struct), brtrue */
7342 if (!mono_class_is_nullable (klass) &&
7343 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7344 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7346 MONO_INST_NEW (cfg, ins, OP_BR);
7347 if (*ip == CEE_BRTRUE_S) {
7350 target = ip + 1 + (signed char)(*ip);
7355 target = ip + 4 + (gint)(read32 (ip));
7358 GET_BBLOCK (cfg, tblock, target);
7359 link_bblock (cfg, bblock, tblock);
7360 CHECK_BBLOCK (target, ip, tblock);
7361 ins->inst_target_bb = tblock;
7362 GET_BBLOCK (cfg, tblock, ip);
7364 * This leads to some inconsistency, since the two bblocks are not
7365 * really connected, but it is needed for handling stack arguments
7366 * correct (See test_0_box_brtrue_opt_regress_81102).
7368 link_bblock (cfg, bblock, tblock);
7369 if (sp != stack_start) {
7370 handle_stack_args (cfg, stack_start, sp - stack_start);
7372 CHECK_UNVERIFIABLE (cfg);
7374 MONO_ADD_INS (bblock, ins);
7375 start_new_bblock = 1;
7382 if (mono_class_is_nullable (klass)) {
7383 GENERIC_SHARING_FAILURE (CEE_BOX);
7388 EMIT_GET_RGCTX (rgctx, context_used);
7389 if (cfg->opt & MONO_OPT_SHARED)
7390 rgctx_info = MONO_RGCTX_INFO_KLASS;
7392 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7393 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7394 *sp++ = handle_box_from_inst (cfg, val, klass, data);
7397 *sp++ = handle_box (cfg, val, klass);
7405 MonoInst *rgctx = NULL;
7410 token = read32 (ip + 1);
7411 klass = mini_get_class (method, token, generic_context);
7412 CHECK_TYPELOAD (klass);
7414 if (cfg->generic_sharing_context)
7415 context_used = mono_class_check_context_used (klass);
7418 EMIT_GET_RGCTX (rgctx, context_used);
7420 if (mono_class_is_nullable (klass)) {
7423 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7424 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7428 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7438 MonoClassField *field;
7442 if (*ip == CEE_STFLD) {
7449 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7451 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7454 token = read32 (ip + 1);
7455 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7456 field = mono_method_get_wrapper_data (method, token);
7457 klass = field->parent;
7460 field = mono_field_from_token (image, token, &klass, generic_context);
7464 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7465 FIELD_ACCESS_FAILURE;
7466 mono_class_init (klass);
7468 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7469 if (*ip == CEE_STFLD) {
7470 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7472 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7473 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7474 MonoInst *iargs [5];
7477 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7478 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7479 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7483 if (cfg->opt & MONO_OPT_INLINE) {
7484 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7485 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7486 g_assert (costs > 0);
7489 cfg->real_offset += 5;
7492 inline_costs += costs;
7495 mono_emit_method_call (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper), iargs, NULL);
7500 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7502 store->flags |= ins_flag;
7509 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7510 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7511 MonoInst *iargs [4];
7514 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7515 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7516 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7517 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7518 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7519 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7521 g_assert (costs > 0);
7524 cfg->real_offset += 5;
7528 inline_costs += costs;
7531 ins = mono_emit_method_call (cfg, wrapper, mono_method_signature (wrapper), iargs, NULL);
7535 if (sp [0]->type == STACK_VTYPE) {
7538 /* Have to compute the address of the variable */
7540 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7542 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7544 g_assert (var->klass == klass);
7546 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7550 if (*ip == CEE_LDFLDA) {
7551 dreg = alloc_preg (cfg);
7553 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7554 ins->klass = mono_class_from_mono_type (field->type);
7555 ins->type = STACK_MP;
7560 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7561 load->flags |= ins_flag;
7572 MonoClassField *field;
7573 gpointer addr = NULL;
7574 gboolean is_special_static;
7577 token = read32 (ip + 1);
7579 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7580 field = mono_method_get_wrapper_data (method, token);
7581 klass = field->parent;
7584 field = mono_field_from_token (image, token, &klass, generic_context);
7587 mono_class_init (klass);
7588 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7589 FIELD_ACCESS_FAILURE;
7592 * We can only support shared generic static
7593 * field access on architectures where the
7594 * trampoline code has been extended to handle
7595 * the generic class init.
7597 #ifndef MONO_ARCH_VTABLE_REG
7598 GENERIC_SHARING_FAILURE (*ip);
7601 if (cfg->generic_sharing_context)
7602 context_used = mono_class_check_context_used (klass);
7604 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7606 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7607 * to be called here.
7609 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7610 mono_class_vtable (cfg->domain, klass);
7611 CHECK_TYPELOAD (klass);
7613 mono_domain_lock (cfg->domain);
7614 if (cfg->domain->special_static_fields)
7615 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7616 mono_domain_unlock (cfg->domain);
7618 is_special_static = mono_class_field_is_special_static (field);
7620 /* Generate IR to compute the field address */
7622 if ((cfg->opt & MONO_OPT_SHARED) ||
7623 (cfg->compile_aot && is_special_static) ||
7624 (context_used && is_special_static)) {
7625 MonoInst *iargs [2];
7627 g_assert (field->parent);
7628 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7632 EMIT_GET_RGCTX (rgctx, context_used);
7633 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7635 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7637 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7638 } else if (context_used) {
7639 MonoInst *rgctx, *static_data;
7642 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7643 method->klass->name_space, method->klass->name, method->name,
7644 depth, field->offset);
7647 if (mono_class_needs_cctor_run (klass, method)) {
7649 MonoInst *vtable, *rgctx;
7651 EMIT_GET_RGCTX (rgctx, context_used);
7652 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7654 // FIXME: This doesn't work since it tries to pass the argument
7655 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7657 * The vtable pointer is always passed in a register regardless of
7658 * the calling convention, so assign it manually, and make a call
7659 * using a signature without parameters.
7661 call = (MonoCallInst*)mono_emit_native_call (cfg, mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT), helper_sig_generic_class_init_trampoline, &vtable);
7662 #ifdef MONO_ARCH_VTABLE_REG
7663 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7670 * The pointer we're computing here is
7672 * super_info.static_data + field->offset
7674 EMIT_GET_RGCTX (rgctx, context_used);
7675 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7677 if (field->offset == 0) {
7680 int addr_reg = mono_alloc_preg (cfg);
7681 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7683 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7684 MonoInst *iargs [2];
7686 g_assert (field->parent);
7687 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7688 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7689 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7691 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7693 CHECK_TYPELOAD (klass);
7695 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7696 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7697 mono_emit_native_call (cfg, tramp,
7698 helper_sig_class_init_trampoline,
7700 if (cfg->verbose_level > 2)
7701 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7702 class_inits = g_slist_prepend (class_inits, vtable);
7704 if (cfg->run_cctors) {
7706 /* This makes so that inline cannot trigger */
7707 /* .cctors: too many apps depend on them */
7708 /* running with a specific order... */
7709 if (! vtable->initialized)
7711 ex = mono_runtime_class_init_full (vtable, FALSE);
7713 set_exception_object (cfg, ex);
7714 goto exception_exit;
7718 addr = (char*)vtable->data + field->offset;
7720 if (cfg->compile_aot)
7721 EMIT_NEW_SFLDACONST (cfg, ins, field);
7723 EMIT_NEW_PCONST (cfg, ins, addr);
7726 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7727 * This could be later optimized to do just a couple of
7728 * memory dereferences with constant offsets.
7730 MonoInst *iargs [1];
7731 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7732 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7736 /* Generate IR to do the actual load/store operation */
7738 if (*ip == CEE_LDSFLDA) {
7739 ins->klass = mono_class_from_mono_type (field->type);
7741 } else if (*ip == CEE_STSFLD) {
7746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7747 store->flags |= ins_flag;
7750 gboolean is_const = FALSE;
7751 MonoVTable *vtable = NULL;
7753 if (!context_used) {
7754 vtable = mono_class_vtable (cfg->domain, klass);
7755 CHECK_TYPELOAD (klass);
7757 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7758 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7759 gpointer addr = (char*)vtable->data + field->offset;
7760 int ro_type = field->type->type;
7761 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7762 ro_type = field->type->data.klass->enum_basetype->type;
7764 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7767 case MONO_TYPE_BOOLEAN:
7769 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7773 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7776 case MONO_TYPE_CHAR:
7778 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7782 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7787 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7791 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7796 case MONO_TYPE_STRING:
7797 case MONO_TYPE_OBJECT:
7798 case MONO_TYPE_CLASS:
7799 case MONO_TYPE_SZARRAY:
7801 case MONO_TYPE_FNPTR:
7802 case MONO_TYPE_ARRAY:
7803 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7804 type_to_eval_stack_type ((cfg), field->type, *sp);
7809 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7814 case MONO_TYPE_VALUETYPE:
7824 CHECK_STACK_OVF (1);
7826 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7827 load->flags |= ins_flag;
7831 /* fixme: dont see the problem why this does not work */
7832 //cfg->disable_aot = TRUE;
7842 token = read32 (ip + 1);
7843 klass = mini_get_class (method, token, generic_context);
7844 CHECK_TYPELOAD (klass);
7845 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7846 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7857 const char *data_ptr;
7864 token = read32 (ip + 1);
7866 klass = mini_get_class (method, token, generic_context);
7867 CHECK_TYPELOAD (klass);
7869 if (cfg->generic_sharing_context)
7870 context_used = mono_class_check_context_used (klass);
7876 /* FIXME: Decompose later to help abcrem */
7879 EMIT_GET_RGCTX (rgctx, context_used);
7880 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7885 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7887 if (cfg->opt & MONO_OPT_SHARED) {
7888 /* Decompose now to avoid problems with references to the domainvar */
7889 MonoInst *iargs [3];
7891 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7892 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7895 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7897 /* Decompose later since it is needed by abcrem */
7898 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7899 ins->dreg = alloc_preg (cfg);
7900 ins->sreg1 = sp [0]->dreg;
7901 ins->inst_newa_class = klass;
7902 ins->type = STACK_OBJ;
7904 MONO_ADD_INS (cfg->cbb, ins);
7905 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7906 cfg->cbb->has_array_access = TRUE;
7908 /* Needed so mono_emit_load_get_addr () gets called */
7909 mono_get_got_var (cfg);
7919 * we inline/optimize the initialization sequence if possible.
7920 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7921 * for small sizes open code the memcpy
7922 * ensure the rva field is big enough
7924 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7925 MonoMethod *memcpy_method = get_memcpy_method ();
7926 MonoInst *iargs [3];
7927 int add_reg = alloc_preg (cfg);
7929 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7930 if (cfg->compile_aot) {
7931 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7933 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7935 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7936 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
7945 if (sp [0]->type != STACK_OBJ)
7948 dreg = alloc_preg (cfg);
7949 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7950 ins->dreg = alloc_preg (cfg);
7951 ins->sreg1 = sp [0]->dreg;
7952 ins->type = STACK_I4;
7953 MONO_ADD_INS (cfg->cbb, ins);
7954 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7955 cfg->cbb->has_array_access = TRUE;
7963 if (sp [0]->type != STACK_OBJ)
7966 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7968 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7969 CHECK_TYPELOAD (klass);
7970 /* we need to make sure that this array is exactly the type it needs
7971 * to be for correctness. the wrappers are lax with their usage
7972 * so we need to ignore them here
7974 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7975 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7978 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7982 case CEE_LDELEM_ANY:
7993 case CEE_LDELEM_REF: {
7999 if (*ip == CEE_LDELEM_ANY) {
8001 token = read32 (ip + 1);
8002 klass = mini_get_class (method, token, generic_context);
8003 CHECK_TYPELOAD (klass);
8004 mono_class_init (klass);
8007 klass = array_access_to_klass (*ip);
8009 if (sp [0]->type != STACK_OBJ)
8012 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8014 if (sp [1]->opcode == OP_ICONST) {
8015 int array_reg = sp [0]->dreg;
8016 int index_reg = sp [1]->dreg;
8017 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8019 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8022 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8023 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8026 if (*ip == CEE_LDELEM_ANY)
8039 case CEE_STELEM_REF:
8040 case CEE_STELEM_ANY: {
8046 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8048 if (*ip == CEE_STELEM_ANY) {
8050 token = read32 (ip + 1);
8051 klass = mini_get_class (method, token, generic_context);
8052 CHECK_TYPELOAD (klass);
8053 mono_class_init (klass);
8056 klass = array_access_to_klass (*ip);
8058 if (sp [0]->type != STACK_OBJ)
8061 /* storing a NULL doesn't need any of the complex checks in stelemref */
8062 if (generic_class_is_reference_type (cfg, klass) &&
8063 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8064 MonoMethod* helper = mono_marshal_get_stelemref ();
8065 MonoInst *iargs [3];
8067 if (sp [0]->type != STACK_OBJ)
8069 if (sp [2]->type != STACK_OBJ)
8076 mono_emit_method_call (cfg, helper, mono_method_signature (helper), iargs, NULL);
8078 if (sp [1]->opcode == OP_ICONST) {
8079 int array_reg = sp [0]->dreg;
8080 int index_reg = sp [1]->dreg;
8081 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8083 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8084 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8086 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8087 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8091 if (*ip == CEE_STELEM_ANY)
8098 case CEE_CKFINITE: {
8102 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8103 ins->sreg1 = sp [0]->dreg;
8104 ins->dreg = alloc_freg (cfg);
8105 ins->type = STACK_R8;
8106 MONO_ADD_INS (bblock, ins);
8112 case CEE_REFANYVAL: {
8113 MonoInst *src_var, *src;
8114 int context_used = 0;
8116 int klass_reg = alloc_preg (cfg);
8117 int dreg = alloc_preg (cfg);
8120 MONO_INST_NEW (cfg, ins, *ip);
8123 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8124 CHECK_TYPELOAD (klass);
8125 mono_class_init (klass);
8127 if (cfg->generic_sharing_context) {
8128 context_used = mono_class_check_context_used (klass);
8129 if (context_used && cfg->compile_aot)
8130 GENERIC_SHARING_FAILURE (*ip);
8135 GENERIC_SHARING_FAILURE (*ip);
8138 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8140 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8141 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8143 mini_emit_class_check (cfg, klass_reg, klass);
8144 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8146 ins->type = STACK_MP;
8151 case CEE_MKREFANY: {
8152 MonoInst *loc, *addr;
8153 int context_used = 0;
8156 MONO_INST_NEW (cfg, ins, *ip);
8159 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8160 CHECK_TYPELOAD (klass);
8161 mono_class_init (klass);
8163 if (cfg->generic_sharing_context) {
8164 context_used = mono_class_check_context_used (klass);
8165 if (context_used && cfg->compile_aot)
8166 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8169 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8170 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8173 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8174 } else if (cfg->compile_aot) {
8175 int const_reg = alloc_preg (cfg);
8176 int type_reg = alloc_preg (cfg);
8178 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8181 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8183 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8184 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8186 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8188 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8189 ins->type = STACK_VTYPE;
8190 ins->klass = mono_defaults.typed_reference_class;
8197 MonoClass *handle_class;
8198 int context_used = 0;
8200 CHECK_STACK_OVF (1);
8203 n = read32 (ip + 1);
8205 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8206 handle = mono_method_get_wrapper_data (method, n);
8207 handle_class = mono_method_get_wrapper_data (method, n + 1);
8208 if (handle_class == mono_defaults.typehandle_class)
8209 handle = &((MonoClass*)handle)->byval_arg;
8212 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8216 mono_class_init (handle_class);
8217 if (cfg->generic_sharing_context) {
8218 if (handle_class == mono_defaults.typehandle_class) {
8219 /* If we get a MONO_TYPE_CLASS
8220 then we need to provide the
8222 instantiation of it. */
8223 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8226 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8227 } else if (handle_class == mono_defaults.fieldhandle_class)
8228 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8229 else if (handle_class == mono_defaults.methodhandle_class)
8230 context_used = mono_method_check_context_used (handle);
8232 g_assert_not_reached ();
8235 if (cfg->opt & MONO_OPT_SHARED) {
8236 MonoInst *addr, *vtvar, *iargs [3];
8237 int method_context_used;
8239 if (cfg->generic_sharing_context)
8240 method_context_used = mono_method_check_context_used (method);
8242 method_context_used = 0;
8244 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8246 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8247 EMIT_NEW_ICONST (cfg, iargs [1], n);
8248 if (method_context_used) {
8251 EMIT_GET_RGCTX (rgctx, method_context_used);
8252 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8253 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8255 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8256 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8258 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8260 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8262 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8264 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8265 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8266 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8267 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8268 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8269 MonoClass *tclass = mono_class_from_mono_type (handle);
8271 mono_class_init (tclass);
8275 g_assert (!cfg->compile_aot);
8276 EMIT_GET_RGCTX (rgctx, context_used);
8277 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8278 } else if (cfg->compile_aot) {
8280 * FIXME: We would have to include the context into the
8281 * aot constant too (tests/generic-array-type.2.exe).
8283 if (generic_context)
8284 cfg->disable_aot = TRUE;
8285 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8287 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8289 ins->type = STACK_OBJ;
8290 ins->klass = cmethod->klass;
8293 MonoInst *addr, *vtvar;
8295 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8300 g_assert (!cfg->compile_aot);
8302 EMIT_GET_RGCTX (rgctx, context_used);
8303 if (handle_class == mono_defaults.typehandle_class) {
8304 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8305 mono_class_from_mono_type (handle),
8306 MONO_RGCTX_INFO_TYPE);
8307 } else if (handle_class == mono_defaults.methodhandle_class) {
8308 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8309 handle, MONO_RGCTX_INFO_METHOD);
8310 } else if (handle_class == mono_defaults.fieldhandle_class) {
8311 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8312 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8314 g_assert_not_reached ();
8316 } else if (cfg->compile_aot) {
8317 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8319 EMIT_NEW_PCONST (cfg, ins, handle);
8321 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8323 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8333 MONO_INST_NEW (cfg, ins, OP_THROW);
8335 ins->sreg1 = sp [0]->dreg;
8337 bblock->out_of_line = TRUE;
8338 MONO_ADD_INS (bblock, ins);
8339 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8340 MONO_ADD_INS (bblock, ins);
8343 link_bblock (cfg, bblock, end_bblock);
8344 start_new_bblock = 1;
8346 case CEE_ENDFINALLY:
8347 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8348 MONO_ADD_INS (bblock, ins);
8350 start_new_bblock = 1;
8353 * Control will leave the method so empty the stack, otherwise
8354 * the next basic block will start with a nonempty stack.
8356 while (sp != stack_start) {
8364 if (*ip == CEE_LEAVE) {
8366 target = ip + 5 + (gint32)read32(ip + 1);
8369 target = ip + 2 + (signed char)(ip [1]);
8372 /* empty the stack */
8373 while (sp != stack_start) {
8378 * If this leave statement is in a catch block, check for a
8379 * pending exception, and rethrow it if necessary.
8381 for (i = 0; i < header->num_clauses; ++i) {
8382 MonoExceptionClause *clause = &header->clauses [i];
8385 * Use <= in the final comparison to handle clauses with multiple
8386 * leave statements, like in bug #78024.
8387 * The ordering of the exception clauses guarantees that we find the
8390 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8392 MonoBasicBlock *dont_throw;
8397 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8400 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8402 NEW_BBLOCK (cfg, dont_throw);
8405 * Currently, we allways rethrow the abort exception, despite the
8406 * fact that this is not correct. See thread6.cs for an example.
8407 * But propagating the abort exception is more important than
8408 * getting the sematics right.
8410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8411 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8412 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8414 MONO_START_BB (cfg, dont_throw);
8419 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8421 for (tmp = handlers; tmp; tmp = tmp->next) {
8423 link_bblock (cfg, bblock, tblock);
8424 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8425 ins->inst_target_bb = tblock;
8426 MONO_ADD_INS (bblock, ins);
8428 g_list_free (handlers);
8431 MONO_INST_NEW (cfg, ins, OP_BR);
8432 MONO_ADD_INS (bblock, ins);
8433 GET_BBLOCK (cfg, tblock, target);
8434 link_bblock (cfg, bblock, tblock);
8435 CHECK_BBLOCK (target, ip, tblock);
8436 ins->inst_target_bb = tblock;
8437 start_new_bblock = 1;
8439 if (*ip == CEE_LEAVE)
8448 * Mono specific opcodes
8450 case MONO_CUSTOM_PREFIX: {
8452 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8456 case CEE_MONO_ICALL: {
8458 MonoJitICallInfo *info;
8460 token = read32 (ip + 2);
8461 func = mono_method_get_wrapper_data (method, token);
8462 info = mono_find_jit_icall_by_addr (func);
8465 CHECK_STACK (info->sig->param_count);
8466 sp -= info->sig->param_count;
8468 ins = mono_emit_jit_icall (cfg, info->func, sp);
8469 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8473 inline_costs += 10 * num_calls++;
8477 case CEE_MONO_LDPTR: {
8480 CHECK_STACK_OVF (1);
8482 token = read32 (ip + 2);
8484 ptr = mono_method_get_wrapper_data (method, token);
8485 if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
8486 MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
8488 if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
8489 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, wrapped);
8495 if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8496 MonoJitICallInfo *callinfo;
8497 const char *icall_name;
8499 icall_name = method->name + strlen ("__icall_wrapper_");
8500 g_assert (icall_name);
8501 callinfo = mono_find_jit_icall_by_name (icall_name);
8502 g_assert (callinfo);
8504 if (ptr == callinfo->func) {
8505 /* Will be transformed into an AOTCONST later */
8506 EMIT_NEW_PCONST (cfg, ins, ptr);
8513 /* FIXME: Generalize this */
8514 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8515 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8520 EMIT_NEW_PCONST (cfg, ins, ptr);
8523 inline_costs += 10 * num_calls++;
8524 /* Can't embed random pointers into AOT code */
8525 cfg->disable_aot = 1;
8528 case CEE_MONO_VTADDR: {
8529 MonoInst *src_var, *src;
8535 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8536 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8541 case CEE_MONO_NEWOBJ: {
8542 MonoInst *iargs [2];
8544 CHECK_STACK_OVF (1);
8546 token = read32 (ip + 2);
8547 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8548 mono_class_init (klass);
8549 NEW_DOMAINCONST (cfg, iargs [0]);
8550 MONO_ADD_INS (cfg->cbb, iargs [0]);
8551 NEW_CLASSCONST (cfg, iargs [1], klass);
8552 MONO_ADD_INS (cfg->cbb, iargs [1]);
8553 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8555 inline_costs += 10 * num_calls++;
8558 case CEE_MONO_OBJADDR:
8561 MONO_INST_NEW (cfg, ins, OP_MOVE);
8562 ins->dreg = alloc_preg (cfg);
8563 ins->sreg1 = sp [0]->dreg;
8564 ins->type = STACK_MP;
8565 MONO_ADD_INS (cfg->cbb, ins);
8569 case CEE_MONO_LDNATIVEOBJ:
8571 * Similar to LDOBJ, but instead load the unmanaged
8572 * representation of the vtype to the stack.
8577 token = read32 (ip + 2);
8578 klass = mono_method_get_wrapper_data (method, token);
8579 g_assert (klass->valuetype);
8580 mono_class_init (klass);
8583 MonoInst *src, *dest, *temp;
8586 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8587 temp->backend.is_pinvoke = 1;
8588 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8589 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8591 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8592 dest->type = STACK_VTYPE;
8593 dest->klass = klass;
8599 case CEE_MONO_RETOBJ: {
8601 * Same as RET, but return the native representation of a vtype
8604 g_assert (cfg->ret);
8605 g_assert (mono_method_signature (method)->pinvoke);
8610 token = read32 (ip + 2);
8611 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8613 if (!cfg->vret_addr) {
8614 g_assert (cfg->ret_var_is_local);
8616 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8618 EMIT_NEW_RETLOADA (cfg, ins);
8620 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8622 if (sp != stack_start)
8625 MONO_INST_NEW (cfg, ins, OP_BR);
8626 ins->inst_target_bb = end_bblock;
8627 MONO_ADD_INS (bblock, ins);
8628 link_bblock (cfg, bblock, end_bblock);
8629 start_new_bblock = 1;
8633 case CEE_MONO_CISINST:
8634 case CEE_MONO_CCASTCLASS: {
8639 token = read32 (ip + 2);
8640 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8641 if (ip [1] == CEE_MONO_CISINST)
8642 ins = handle_cisinst (cfg, klass, sp [0]);
8644 ins = handle_ccastclass (cfg, klass, sp [0]);
8650 case CEE_MONO_SAVE_LMF:
8651 case CEE_MONO_RESTORE_LMF:
8652 #ifdef MONO_ARCH_HAVE_LMF_OPS
8653 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8654 MONO_ADD_INS (bblock, ins);
8655 cfg->need_lmf_area = TRUE;
8659 case CEE_MONO_CLASSCONST:
8660 CHECK_STACK_OVF (1);
8662 token = read32 (ip + 2);
8663 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8666 inline_costs += 10 * num_calls++;
8668 case CEE_MONO_NOT_TAKEN:
8669 bblock->out_of_line = TRUE;
8673 CHECK_STACK_OVF (1);
8675 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8676 ins->dreg = alloc_preg (cfg);
8677 ins->inst_offset = (gint32)read32 (ip + 2);
8678 ins->type = STACK_PTR;
8679 MONO_ADD_INS (bblock, ins);
8684 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8694 /* somewhat similar to LDTOKEN */
8695 MonoInst *addr, *vtvar;
8696 CHECK_STACK_OVF (1);
8697 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8699 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8700 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8702 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8703 ins->type = STACK_VTYPE;
8704 ins->klass = mono_defaults.argumenthandle_class;
8717 * The following transforms:
8718 * CEE_CEQ into OP_CEQ
8719 * CEE_CGT into OP_CGT
8720 * CEE_CGT_UN into OP_CGT_UN
8721 * CEE_CLT into OP_CLT
8722 * CEE_CLT_UN into OP_CLT_UN
8724 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8726 MONO_INST_NEW (cfg, ins, cmp->opcode);
8728 cmp->sreg1 = sp [0]->dreg;
8729 cmp->sreg2 = sp [1]->dreg;
8730 type_from_op (cmp, sp [0], sp [1]);
8732 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8733 cmp->opcode = OP_LCOMPARE;
8734 else if (sp [0]->type == STACK_R8)
8735 cmp->opcode = OP_FCOMPARE;
8737 cmp->opcode = OP_ICOMPARE;
8738 MONO_ADD_INS (bblock, cmp);
8739 ins->type = STACK_I4;
8740 ins->dreg = alloc_dreg (cfg, ins->type);
8741 type_from_op (ins, sp [0], sp [1]);
8743 if (cmp->opcode == OP_FCOMPARE) {
8745 * The backends expect the fceq opcodes to do the
8748 cmp->opcode = OP_NOP;
8749 ins->sreg1 = cmp->sreg1;
8750 ins->sreg2 = cmp->sreg2;
8752 MONO_ADD_INS (bblock, ins);
8759 MonoMethod *cil_method, *ctor_method;
8760 gboolean is_shared = FALSE;
8762 CHECK_STACK_OVF (1);
8764 n = read32 (ip + 2);
8765 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8768 mono_class_init (cmethod->klass);
8770 if (cfg->generic_sharing_context)
8771 context_used = mono_method_check_context_used (cmethod);
8773 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8774 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8775 (cmethod->klass->generic_class ||
8776 cmethod->klass->generic_container)) {
8779 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8783 cil_method = cmethod;
8784 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8785 METHOD_ACCESS_FAILURE;
8787 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8788 if (check_linkdemand (cfg, method, cmethod))
8790 CHECK_CFG_EXCEPTION;
8791 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8792 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8796 * Optimize the common case of ldftn+delegate creation
8798 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8799 /* FIXME: SGEN support */
8800 /* FIXME: handle shared static generic methods */
8801 /* FIXME: handle this in shared code */
8802 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8803 MonoInst *target_ins;
8806 if (cfg->verbose_level > 3)
8807 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8808 target_ins = sp [-1];
8810 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8821 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8823 EMIT_GET_RGCTX (rgctx, context_used);
8824 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8825 } else if (is_shared) {
8826 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8828 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8830 if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
8831 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8833 ins = mono_emit_jit_icall (cfg, mono_ldftn_nosync, &argconst);
8837 inline_costs += 10 * num_calls++;
8840 case CEE_LDVIRTFTN: {
8845 n = read32 (ip + 2);
8846 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8849 mono_class_init (cmethod->klass);
8851 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8852 GENERIC_SHARING_FAILURE (CEE_LDVIRTFTN);
8854 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8855 if (check_linkdemand (cfg, method, cmethod))
8857 CHECK_CFG_EXCEPTION;
8858 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8859 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8864 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8865 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8868 inline_costs += 10 * num_calls++;
8872 CHECK_STACK_OVF (1);
8874 n = read16 (ip + 2);
8876 EMIT_NEW_ARGLOAD (cfg, ins, n);
8881 CHECK_STACK_OVF (1);
8883 n = read16 (ip + 2);
8885 NEW_ARGLOADA (cfg, ins, n);
8886 MONO_ADD_INS (cfg->cbb, ins);
8894 n = read16 (ip + 2);
8896 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8898 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8902 CHECK_STACK_OVF (1);
8904 n = read16 (ip + 2);
8906 EMIT_NEW_LOCLOAD (cfg, ins, n);
8911 CHECK_STACK_OVF (1);
8913 n = read16 (ip + 2);
8915 EMIT_NEW_LOCLOADA (cfg, ins, n);
8923 n = read16 (ip + 2);
8925 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8927 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8934 if (sp != stack_start)
8936 if (cfg->method != method)
8938 * Inlining this into a loop in a parent could lead to
8939 * stack overflows which is different behavior than the
8940 * non-inlined case, thus disable inlining in this case.
8942 goto inline_failure;
8944 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8945 ins->dreg = alloc_preg (cfg);
8946 ins->sreg1 = sp [0]->dreg;
8947 ins->type = STACK_PTR;
8948 MONO_ADD_INS (cfg->cbb, ins);
8950 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8951 if (header->init_locals)
8952 ins->flags |= MONO_INST_INIT;
8957 case CEE_ENDFILTER: {
8958 MonoExceptionClause *clause, *nearest;
8959 int cc, nearest_num;
8963 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8965 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8966 ins->sreg1 = (*sp)->dreg;
8967 MONO_ADD_INS (bblock, ins);
8968 start_new_bblock = 1;
8973 for (cc = 0; cc < header->num_clauses; ++cc) {
8974 clause = &header->clauses [cc];
8975 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8976 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8977 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8983 if ((ip - header->code) != nearest->handler_offset)
8988 case CEE_UNALIGNED_:
8989 ins_flag |= MONO_INST_UNALIGNED;
8990 /* FIXME: record alignment? we can assume 1 for now */
8995 ins_flag |= MONO_INST_VOLATILE;
8999 ins_flag |= MONO_INST_TAILCALL;
9000 cfg->flags |= MONO_CFG_HAS_TAIL;
9001 /* Can't inline tail calls at this time */
9002 inline_costs += 100000;
9009 token = read32 (ip + 2);
9010 klass = mini_get_class (method, token, generic_context);
9011 CHECK_TYPELOAD (klass);
9012 if (generic_class_is_reference_type (cfg, klass)) {
9013 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9015 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9016 mini_emit_initobj (cfg, *sp, NULL, klass);
9021 case CEE_CONSTRAINED_:
9023 token = read32 (ip + 2);
9024 constrained_call = mono_class_get_full (image, token, generic_context);
9025 CHECK_TYPELOAD (constrained_call);
9030 MonoInst *iargs [3];
9034 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9035 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9036 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9037 /* emit_memset only works when val == 0 */
9038 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9043 if (ip [1] == CEE_CPBLK) {
9044 MonoMethod *memcpy_method = get_memcpy_method ();
9045 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
9047 MonoMethod *memset_method = get_memset_method ();
9048 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
9058 ins_flag |= MONO_INST_NOTYPECHECK;
9060 ins_flag |= MONO_INST_NORANGECHECK;
9061 /* we ignore the no-nullcheck for now since we
9062 * really do it explicitly only when doing callvirt->call
9068 int handler_offset = -1;
9070 for (i = 0; i < header->num_clauses; ++i) {
9071 MonoExceptionClause *clause = &header->clauses [i];
9072 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY))
9073 handler_offset = clause->handler_offset;
9076 bblock->flags |= BB_EXCEPTION_UNSAFE;
9078 g_assert (handler_offset != -1);
9080 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9081 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9082 ins->sreg1 = load->dreg;
9083 MONO_ADD_INS (bblock, ins);
9085 link_bblock (cfg, bblock, end_bblock);
9086 start_new_bblock = 1;
9094 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9096 CHECK_STACK_OVF (1);
9098 token = read32 (ip + 2);
9099 /* FIXXME: handle generics. */
9100 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9101 MonoType *type = mono_type_create_from_typespec (image, token);
9102 token = mono_type_size (type, &ialign);
9104 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9105 CHECK_TYPELOAD (klass);
9106 mono_class_init (klass);
9107 token = mono_class_value_size (klass, &align);
9109 EMIT_NEW_ICONST (cfg, ins, token);
9114 case CEE_REFANYTYPE: {
9115 MonoInst *src_var, *src;
9121 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9123 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9124 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9125 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9135 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9140 g_error ("opcode 0x%02x not handled", *ip);
9143 if (start_new_bblock != 1)
9146 bblock->cil_length = ip - bblock->cil_code;
9147 bblock->next_bb = end_bblock;
9149 if (cfg->method == method && cfg->domainvar) {
9151 MonoInst *get_domain;
9153 cfg->cbb = init_localsbb;
9155 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9156 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9159 get_domain->dreg = alloc_preg (cfg);
9160 MONO_ADD_INS (cfg->cbb, get_domain);
9162 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9163 MONO_ADD_INS (cfg->cbb, store);
9166 if (cfg->method == method && cfg->got_var)
9167 mono_emit_load_got_addr (cfg);
9169 if (header->init_locals) {
9172 cfg->cbb = init_localsbb;
9173 cfg->ip = header->code;
9174 for (i = 0; i < header->num_locals; ++i) {
9175 MonoType *ptype = header->locals [i];
9176 int t = ptype->type;
9177 dreg = cfg->locals [i]->dreg;
9179 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9180 t = ptype->data.klass->enum_basetype->type;
9182 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9183 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9184 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9185 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9186 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9187 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9188 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9189 ins->type = STACK_R8;
9190 ins->inst_p0 = (void*)&r8_0;
9191 ins->dreg = alloc_dreg (cfg, STACK_R8);
9192 MONO_ADD_INS (init_localsbb, ins);
9193 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9194 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9195 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9196 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9198 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9205 /* resolve backward branches in the middle of an existing basic block */
9206 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9208 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9209 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9210 if (tblock != start_bblock) {
9212 split_bblock (cfg, tblock, bblock);
9213 l = bblock->cil_code - header->code;
9214 bblock->cil_length = tblock->cil_length - l;
9215 tblock->cil_length = l;
9217 printf ("recheck failed.\n");
9221 if (cfg->method == method) {
9223 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9224 bb->region = mono_find_block_region (cfg, bb->real_offset);
9226 mono_create_spvar_for_region (cfg, bb->region);
9227 if (cfg->verbose_level > 2)
9228 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9232 g_slist_free (class_inits);
9233 dont_inline = g_list_remove (dont_inline, method);
9235 if (inline_costs < 0) {
9238 /* Method is too large */
9239 mname = mono_method_full_name (method, TRUE);
9240 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9241 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9246 if ((cfg->verbose_level > 1) && (cfg->method == method))
9247 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9249 return inline_costs;
9252 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9253 g_slist_free (class_inits);
9254 dont_inline = g_list_remove (dont_inline, method);
9258 g_slist_free (class_inits);
9259 dont_inline = g_list_remove (dont_inline, method);
9263 g_slist_free (class_inits);
9264 dont_inline = g_list_remove (dont_inline, method);
9265 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9269 g_slist_free (class_inits);
9270 dont_inline = g_list_remove (dont_inline, method);
9271 set_exception_type_from_invalid_il (cfg, method, ip);
9276 store_membase_reg_to_store_membase_imm (int opcode)
9279 case OP_STORE_MEMBASE_REG:
9280 return OP_STORE_MEMBASE_IMM;
9281 case OP_STOREI1_MEMBASE_REG:
9282 return OP_STOREI1_MEMBASE_IMM;
9283 case OP_STOREI2_MEMBASE_REG:
9284 return OP_STOREI2_MEMBASE_IMM;
9285 case OP_STOREI4_MEMBASE_REG:
9286 return OP_STOREI4_MEMBASE_IMM;
9287 case OP_STOREI8_MEMBASE_REG:
9288 return OP_STOREI8_MEMBASE_IMM;
9290 g_assert_not_reached ();
9297 mono_op_to_op_imm (int opcode)
9307 return OP_IDIV_UN_IMM;
9311 return OP_IREM_UN_IMM;
9325 return OP_ISHR_UN_IMM;
9342 return OP_LSHR_UN_IMM;
9345 return OP_COMPARE_IMM;
9347 return OP_ICOMPARE_IMM;
9349 return OP_LCOMPARE_IMM;
9351 case OP_STORE_MEMBASE_REG:
9352 return OP_STORE_MEMBASE_IMM;
9353 case OP_STOREI1_MEMBASE_REG:
9354 return OP_STOREI1_MEMBASE_IMM;
9355 case OP_STOREI2_MEMBASE_REG:
9356 return OP_STOREI2_MEMBASE_IMM;
9357 case OP_STOREI4_MEMBASE_REG:
9358 return OP_STOREI4_MEMBASE_IMM;
9360 #if defined(__i386__) || defined (__x86_64__)
9362 return OP_X86_PUSH_IMM;
9363 case OP_X86_COMPARE_MEMBASE_REG:
9364 return OP_X86_COMPARE_MEMBASE_IMM;
9366 #if defined(__x86_64__)
9367 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9368 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9370 case OP_VOIDCALL_REG:
9379 return OP_LOCALLOC_IMM;
9386 ldind_to_load_membase (int opcode)
9390 return OP_LOADI1_MEMBASE;
9392 return OP_LOADU1_MEMBASE;
9394 return OP_LOADI2_MEMBASE;
9396 return OP_LOADU2_MEMBASE;
9398 return OP_LOADI4_MEMBASE;
9400 return OP_LOADU4_MEMBASE;
9402 return OP_LOAD_MEMBASE;
9404 return OP_LOAD_MEMBASE;
9406 return OP_LOADI8_MEMBASE;
9408 return OP_LOADR4_MEMBASE;
9410 return OP_LOADR8_MEMBASE;
9412 g_assert_not_reached ();
9419 stind_to_store_membase (int opcode)
9423 return OP_STOREI1_MEMBASE_REG;
9425 return OP_STOREI2_MEMBASE_REG;
9427 return OP_STOREI4_MEMBASE_REG;
9430 return OP_STORE_MEMBASE_REG;
9432 return OP_STOREI8_MEMBASE_REG;
9434 return OP_STORER4_MEMBASE_REG;
9436 return OP_STORER8_MEMBASE_REG;
9438 g_assert_not_reached ();
9445 mono_load_membase_to_load_mem (int opcode)
9447 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9448 #if defined(__i386__) || defined(__x86_64__)
9450 case OP_LOAD_MEMBASE:
9452 case OP_LOADU1_MEMBASE:
9453 return OP_LOADU1_MEM;
9454 case OP_LOADU2_MEMBASE:
9455 return OP_LOADU2_MEM;
9456 case OP_LOADI4_MEMBASE:
9457 return OP_LOADI4_MEM;
9458 case OP_LOADU4_MEMBASE:
9459 return OP_LOADU4_MEM;
9460 #if SIZEOF_VOID_P == 8
9461 case OP_LOADI8_MEMBASE:
9462 return OP_LOADI8_MEM;
9471 op_to_op_dest_membase (int store_opcode, int opcode)
9473 #if defined(__i386__)
9474 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9479 return OP_X86_ADD_MEMBASE_REG;
9481 return OP_X86_SUB_MEMBASE_REG;
9483 return OP_X86_AND_MEMBASE_REG;
9485 return OP_X86_OR_MEMBASE_REG;
9487 return OP_X86_XOR_MEMBASE_REG;
9490 return OP_X86_ADD_MEMBASE_IMM;
9493 return OP_X86_SUB_MEMBASE_IMM;
9496 return OP_X86_AND_MEMBASE_IMM;
9499 return OP_X86_OR_MEMBASE_IMM;
9502 return OP_X86_XOR_MEMBASE_IMM;
9508 #if defined(__x86_64__)
9509 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9514 return OP_X86_ADD_MEMBASE_REG;
9516 return OP_X86_SUB_MEMBASE_REG;
9518 return OP_X86_AND_MEMBASE_REG;
9520 return OP_X86_OR_MEMBASE_REG;
9522 return OP_X86_XOR_MEMBASE_REG;
9524 return OP_X86_ADD_MEMBASE_IMM;
9526 return OP_X86_SUB_MEMBASE_IMM;
9528 return OP_X86_AND_MEMBASE_IMM;
9530 return OP_X86_OR_MEMBASE_IMM;
9532 return OP_X86_XOR_MEMBASE_IMM;
9534 return OP_AMD64_ADD_MEMBASE_REG;
9536 return OP_AMD64_SUB_MEMBASE_REG;
9538 return OP_AMD64_AND_MEMBASE_REG;
9540 return OP_AMD64_OR_MEMBASE_REG;
9542 return OP_AMD64_XOR_MEMBASE_REG;
9545 return OP_AMD64_ADD_MEMBASE_IMM;
9548 return OP_AMD64_SUB_MEMBASE_IMM;
9551 return OP_AMD64_AND_MEMBASE_IMM;
9554 return OP_AMD64_OR_MEMBASE_IMM;
9557 return OP_AMD64_XOR_MEMBASE_IMM;
9567 op_to_op_store_membase (int store_opcode, int opcode)
9569 #if defined(__i386__) || defined(__x86_64__)
9572 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9573 return OP_X86_SETEQ_MEMBASE;
9575 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9576 return OP_X86_SETNE_MEMBASE;
9584 op_to_op_src1_membase (int load_opcode, int opcode)
9587 /* FIXME: This has sign extension issues */
9589 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9590 return OP_X86_COMPARE_MEMBASE8_IMM;
9593 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9598 return OP_X86_PUSH_MEMBASE;
9599 case OP_COMPARE_IMM:
9600 case OP_ICOMPARE_IMM:
9601 return OP_X86_COMPARE_MEMBASE_IMM;
9604 return OP_X86_COMPARE_MEMBASE_REG;
9609 /* FIXME: This has sign extension issues */
9611 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9612 return OP_X86_COMPARE_MEMBASE8_IMM;
9617 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9618 return OP_X86_PUSH_MEMBASE;
9620 /* FIXME: This only works for 32 bit immediates
9621 case OP_COMPARE_IMM:
9622 case OP_LCOMPARE_IMM:
9623 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9624 return OP_AMD64_COMPARE_MEMBASE_IMM;
9626 case OP_ICOMPARE_IMM:
9627 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9628 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9632 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9633 return OP_AMD64_COMPARE_MEMBASE_REG;
9636 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9637 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9646 op_to_op_src2_membase (int load_opcode, int opcode)
9649 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9655 return OP_X86_COMPARE_REG_MEMBASE;
9657 return OP_X86_ADD_REG_MEMBASE;
9659 return OP_X86_SUB_REG_MEMBASE;
9661 return OP_X86_AND_REG_MEMBASE;
9663 return OP_X86_OR_REG_MEMBASE;
9665 return OP_X86_XOR_REG_MEMBASE;
9672 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9673 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9677 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9678 return OP_AMD64_COMPARE_REG_MEMBASE;
9681 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9682 return OP_X86_ADD_REG_MEMBASE;
9684 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9685 return OP_X86_SUB_REG_MEMBASE;
9687 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9688 return OP_X86_AND_REG_MEMBASE;
9690 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9691 return OP_X86_OR_REG_MEMBASE;
9693 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9694 return OP_X86_XOR_REG_MEMBASE;
9696 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9697 return OP_AMD64_ADD_REG_MEMBASE;
9699 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9700 return OP_AMD64_SUB_REG_MEMBASE;
9702 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9703 return OP_AMD64_AND_REG_MEMBASE;
9705 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9706 return OP_AMD64_OR_REG_MEMBASE;
9708 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9709 return OP_AMD64_XOR_REG_MEMBASE;
9717 mono_op_to_op_imm_noemul (int opcode)
9720 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPTS)
9725 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9733 return mono_op_to_op_imm (opcode);
9738 * mono_handle_global_vregs:
9740 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9744 mono_handle_global_vregs (MonoCompile *cfg)
9750 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9752 /* Find local vregs used in more than one bb */
9753 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9754 MonoInst *ins = bb->code;
9755 int block_num = bb->block_num;
9757 if (cfg->verbose_level > 1)
9758 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9761 for (; ins; ins = ins->next) {
9762 const char *spec = INS_INFO (ins->opcode);
9763 int regtype, regindex;
9766 if (G_UNLIKELY (cfg->verbose_level > 1))
9767 mono_print_ins (ins);
9769 g_assert (ins->opcode >= MONO_CEE_LAST);
9771 for (regindex = 0; regindex < 3; regindex ++) {
9774 if (regindex == 0) {
9775 regtype = spec [MONO_INST_DEST];
9779 } else if (regindex == 1) {
9780 regtype = spec [MONO_INST_SRC1];
9785 regtype = spec [MONO_INST_SRC2];
9791 #if SIZEOF_VOID_P == 4
9792 if (regtype == 'l') {
9794 * Since some instructions reference the original long vreg,
9795 * and some reference the two component vregs, it is quite hard
9796 * to determine when it needs to be global. So be conservative.
9798 if (!get_vreg_to_inst (cfg, vreg)) {
9799 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9801 if (cfg->verbose_level > 1)
9802 printf ("LONG VREG R%d made global.\n", vreg);
9806 * Make the component vregs volatile since the optimizations can
9807 * get confused otherwise.
9809 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9810 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9814 g_assert (vreg != -1);
9816 prev_bb = vreg_to_bb [vreg];
9818 /* 0 is a valid block num */
9819 vreg_to_bb [vreg] = block_num + 1;
9820 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9821 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9824 if (!get_vreg_to_inst (cfg, vreg)) {
9825 if (G_UNLIKELY (cfg->verbose_level > 1))
9826 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9830 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9833 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9836 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9839 g_assert_not_reached ();
9843 /* Flag as having been used in more than one bb */
9844 vreg_to_bb [vreg] = -1;
9850 /* If a variable is used in only one bblock, convert it into a local vreg */
9851 for (i = 0; i < cfg->num_varinfo; i++) {
9852 MonoInst *var = cfg->varinfo [i];
9853 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9855 switch (var->type) {
9861 #if SIZEOF_VOID_P == 8
9864 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9865 /* Enabling this screws up the fp stack on x86 */
9868 /* Arguments are implicitly global */
9869 /* Putting R4 vars into registers doesn't work currently */
9870 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9872 * Make that the variable's liveness interval doesn't contain a call, since
9873 * that would cause the lvreg to be spilled, making the whole optimization
9876 /* This is too slow for JIT compilation */
9878 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9880 int def_index, call_index, ins_index;
9881 gboolean spilled = FALSE;
9886 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9887 const char *spec = INS_INFO (ins->opcode);
9889 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9890 def_index = ins_index;
9892 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9893 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9894 if (call_index > def_index) {
9900 if (MONO_IS_CALL (ins))
9901 call_index = ins_index;
9911 if (G_UNLIKELY (cfg->verbose_level > 2))
9912 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9913 var->flags |= MONO_INST_IS_DEAD;
9914 cfg->vreg_to_inst [var->dreg] = NULL;
9921 * Compress the varinfo and vars tables so the liveness computation is faster and
9922 * takes up less space.
9925 for (i = 0; i < cfg->num_varinfo; ++i) {
9926 MonoInst *var = cfg->varinfo [i];
9927 if (pos < i && cfg->locals_start == i)
9928 cfg->locals_start = pos;
9929 if (!(var->flags & MONO_INST_IS_DEAD)) {
9931 cfg->varinfo [pos] = cfg->varinfo [i];
9932 cfg->varinfo [pos]->inst_c0 = pos;
9933 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9934 cfg->vars [pos].idx = pos;
9935 #if SIZEOF_VOID_P == 4
9936 if (cfg->varinfo [pos]->type == STACK_I8) {
9937 /* Modify the two component vars too */
9940 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9941 var1->inst_c0 = pos;
9942 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9943 var1->inst_c0 = pos;
9950 cfg->num_varinfo = pos;
9951 if (cfg->locals_start > cfg->num_varinfo)
9952 cfg->locals_start = cfg->num_varinfo;
9956 * mono_spill_global_vars:
9958 * Generate spill code for variables which are not allocated to registers,
9959 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9960 * code is generated which could be optimized by the local optimization passes.
9963 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9968 guint32 *vreg_to_lvreg;
9970 guint32 i, lvregs_len;
9971 gboolean dest_has_lvreg = FALSE;
9972 guint32 stacktypes [128];
9974 *need_local_opts = FALSE;
9976 memset (spec2, 0, sizeof (spec2));
9978 /* FIXME: Move this function to mini.c */
9979 stacktypes ['i'] = STACK_PTR;
9980 stacktypes ['l'] = STACK_I8;
9981 stacktypes ['f'] = STACK_R8;
9983 #if SIZEOF_VOID_P == 4
9984 /* Create MonoInsts for longs */
9985 for (i = 0; i < cfg->num_varinfo; i++) {
9986 MonoInst *ins = cfg->varinfo [i];
9988 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9989 switch (ins->type) {
9990 #ifdef MONO_ARCH_SOFT_FLOAT
9996 g_assert (ins->opcode == OP_REGOFFSET);
9998 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10000 tree->opcode = OP_REGOFFSET;
10001 tree->inst_basereg = ins->inst_basereg;
10002 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10004 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10006 tree->opcode = OP_REGOFFSET;
10007 tree->inst_basereg = ins->inst_basereg;
10008 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10018 /* FIXME: widening and truncation */
10021 * As an optimization, when a variable allocated to the stack is first loaded into
10022 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10023 * the variable again.
10025 orig_next_vreg = cfg->next_vreg;
10026 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10027 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10030 /* Add spill loads/stores */
10031 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10034 if (cfg->verbose_level > 1)
10035 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10037 /* Clear vreg_to_lvreg array */
10038 for (i = 0; i < lvregs_len; i++)
10039 vreg_to_lvreg [lvregs [i]] = 0;
10043 MONO_BB_FOR_EACH_INS (bb, ins) {
10044 const char *spec = INS_INFO (ins->opcode);
10045 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10046 gboolean store, no_lvreg;
10048 if (G_UNLIKELY (cfg->verbose_level > 1))
10049 mono_print_ins (ins);
10051 if (ins->opcode == OP_NOP)
10055 * We handle LDADDR here as well, since it can only be decomposed
10056 * when variable addresses are known.
10058 if (ins->opcode == OP_LDADDR) {
10059 MonoInst *var = ins->inst_p0;
10061 if (var->opcode == OP_VTARG_ADDR) {
10062 /* Happens on SPARC/S390 where vtypes are passed by reference */
10063 MonoInst *vtaddr = var->inst_left;
10064 if (vtaddr->opcode == OP_REGVAR) {
10065 ins->opcode = OP_MOVE;
10066 ins->sreg1 = vtaddr->dreg;
10068 else if (var->inst_left->opcode == OP_REGOFFSET) {
10069 ins->opcode = OP_LOAD_MEMBASE;
10070 ins->inst_basereg = vtaddr->inst_basereg;
10071 ins->inst_offset = vtaddr->inst_offset;
10075 g_assert (var->opcode == OP_REGOFFSET);
10077 ins->opcode = OP_ADD_IMM;
10078 ins->sreg1 = var->inst_basereg;
10079 ins->inst_imm = var->inst_offset;
10082 *need_local_opts = TRUE;
10083 spec = INS_INFO (ins->opcode);
10086 if (ins->opcode < MONO_CEE_LAST) {
10087 mono_print_ins (ins);
10088 g_assert_not_reached ();
10092 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10096 if (MONO_IS_STORE_MEMBASE (ins)) {
10097 tmp_reg = ins->dreg;
10098 ins->dreg = ins->sreg2;
10099 ins->sreg2 = tmp_reg;
10102 spec2 [MONO_INST_DEST] = ' ';
10103 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10104 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10106 } else if (MONO_IS_STORE_MEMINDEX (ins))
10107 g_assert_not_reached ();
10112 if (G_UNLIKELY (cfg->verbose_level > 1))
10113 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10118 regtype = spec [MONO_INST_DEST];
10119 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10122 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10123 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10124 MonoInst *store_ins;
10127 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10129 if (var->opcode == OP_REGVAR) {
10130 ins->dreg = var->dreg;
10131 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10133 * Instead of emitting a load+store, use a _membase opcode.
10135 g_assert (var->opcode == OP_REGOFFSET);
10136 if (ins->opcode == OP_MOVE) {
10139 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10140 ins->inst_basereg = var->inst_basereg;
10141 ins->inst_offset = var->inst_offset;
10144 spec = INS_INFO (ins->opcode);
10148 g_assert (var->opcode == OP_REGOFFSET);
10150 prev_dreg = ins->dreg;
10152 /* Invalidate any previous lvreg for this vreg */
10153 vreg_to_lvreg [ins->dreg] = 0;
10157 #ifdef MONO_ARCH_SOFT_FLOAT
10158 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10160 store_opcode = OP_STOREI8_MEMBASE_REG;
10164 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10166 if (regtype == 'l') {
10167 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10168 mono_bblock_insert_after_ins (bb, ins, store_ins);
10169 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10170 mono_bblock_insert_after_ins (bb, ins, store_ins);
10173 g_assert (store_opcode != OP_STOREV_MEMBASE);
10175 /* Try to fuse the store into the instruction itself */
10176 /* FIXME: Add more instructions */
10177 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10178 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10179 ins->inst_imm = ins->inst_c0;
10180 ins->inst_destbasereg = var->inst_basereg;
10181 ins->inst_offset = var->inst_offset;
10182 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10183 ins->opcode = store_opcode;
10184 ins->inst_destbasereg = var->inst_basereg;
10185 ins->inst_offset = var->inst_offset;
10189 tmp_reg = ins->dreg;
10190 ins->dreg = ins->sreg2;
10191 ins->sreg2 = tmp_reg;
10194 spec2 [MONO_INST_DEST] = ' ';
10195 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10196 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10198 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10199 // FIXME: The backends expect the base reg to be in inst_basereg
10200 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10202 ins->inst_basereg = var->inst_basereg;
10203 ins->inst_offset = var->inst_offset;
10204 spec = INS_INFO (ins->opcode);
10206 /* printf ("INS: "); mono_print_ins (ins); */
10207 /* Create a store instruction */
10208 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10210 /* Insert it after the instruction */
10211 mono_bblock_insert_after_ins (bb, ins, store_ins);
10214 * We can't assign ins->dreg to var->dreg here, since the
10215 * sregs could use it. So set a flag, and do it after
10218 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10219 dest_has_lvreg = TRUE;
10228 for (srcindex = 0; srcindex < 2; ++srcindex) {
10229 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10230 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10232 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10233 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10234 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10235 MonoInst *load_ins;
10236 guint32 load_opcode;
10238 if (var->opcode == OP_REGVAR) {
10240 ins->sreg1 = var->dreg;
10242 ins->sreg2 = var->dreg;
10246 g_assert (var->opcode == OP_REGOFFSET);
10248 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10250 g_assert (load_opcode != OP_LOADV_MEMBASE);
10252 if (vreg_to_lvreg [sreg]) {
10253 /* The variable is already loaded to an lvreg */
10254 if (G_UNLIKELY (cfg->verbose_level > 1))
10255 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10257 ins->sreg1 = vreg_to_lvreg [sreg];
10259 ins->sreg2 = vreg_to_lvreg [sreg];
10263 /* Try to fuse the load into the instruction */
10264 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10265 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10266 ins->inst_basereg = var->inst_basereg;
10267 ins->inst_offset = var->inst_offset;
10268 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10269 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10270 ins->sreg2 = var->inst_basereg;
10271 ins->inst_offset = var->inst_offset;
10273 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10274 ins->opcode = OP_NOP;
10277 //printf ("%d ", srcindex); mono_print_ins (ins);
10279 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10281 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10282 if (var->dreg == prev_dreg) {
10284 * sreg refers to the value loaded by the load
10285 * emitted below, but we need to use ins->dreg
10286 * since it refers to the store emitted earlier.
10290 vreg_to_lvreg [var->dreg] = sreg;
10291 g_assert (lvregs_len < 1024);
10292 lvregs [lvregs_len ++] = var->dreg;
10301 if (regtype == 'l') {
10302 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10303 mono_bblock_insert_before_ins (bb, ins, load_ins);
10304 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10305 mono_bblock_insert_before_ins (bb, ins, load_ins);
10308 #if SIZEOF_VOID_P == 4
10309 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10311 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10312 mono_bblock_insert_before_ins (bb, ins, load_ins);
10318 if (dest_has_lvreg) {
10319 vreg_to_lvreg [prev_dreg] = ins->dreg;
10320 g_assert (lvregs_len < 1024);
10321 lvregs [lvregs_len ++] = prev_dreg;
10322 dest_has_lvreg = FALSE;
10326 tmp_reg = ins->dreg;
10327 ins->dreg = ins->sreg2;
10328 ins->sreg2 = tmp_reg;
10331 if (MONO_IS_CALL (ins)) {
10332 /* Clear vreg_to_lvreg array */
10333 for (i = 0; i < lvregs_len; i++)
10334 vreg_to_lvreg [lvregs [i]] = 0;
10338 if (cfg->verbose_level > 1)
10339 mono_print_ins_index (1, ins);
10346 * - use 'iadd' instead of 'int_add'
10347 * - handling ovf opcodes: decompose in method_to_ir.
10348 * - unify iregs/fregs
10349 * -> partly done, the missing parts are:
10350 * - a more complete unification would involve unifying the hregs as well, so
10351 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10352 * would no longer map to the machine hregs, so the code generators would need to
10353 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10354 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10355 * fp/non-fp branches speeds it up by about 15%.
10356 * - use sext/zext opcodes instead of shifts
10358 * - get rid of TEMPLOADs if possible and use vregs instead
10359 * - clean up usage of OP_P/OP_ opcodes
10360 * - cleanup usage of DUMMY_USE
10361 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10363 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10364 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10365 * - make sure handle_stack_args () is called before the branch is emitted
10366 * - when the new IR is done, get rid of all unused stuff
10367 * - COMPARE/BEQ as separate instructions or unify them ?
10368 * - keeping them separate allows specialized compare instructions like
10369 * compare_imm, compare_membase
10370 * - most back ends unify fp compare+branch, fp compare+ceq
10371 * - integrate handle_stack_args into inline_method
10372 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10373 * - Things to backport to the old JIT:
10374 * - op_atomic_exchange fix for amd64
10375 * - localloc fix for amd64
10376 * - x86 type_token change
10378 * - long eq/ne optimizations
10379 * - handle long shift opts on 32 bit platforms somehow: they require
10380 * 3 sregs (2 for arg1 and 1 for arg2)
10381 * - make byref a 'normal' type.
10382 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10383 * variable if needed.
10384 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10385 * like inline_method.
10386 * - remove inlining restrictions
10387 * - remove mono_save_args.
10388 * - add 'introduce a new optimization to simplify some range checks'
10389 * - fix LNEG and enable cfold of INEG
10390 * - generalize x86 optimizations like ldelema as a peephole optimization
10391 * - add store_mem_imm for amd64
10392 * - optimize the loading of the interruption flag in the managed->native wrappers
10393 * - avoid special handling of OP_NOP in passes
10394 * - move code inserting instructions into one function/macro.
10395 * - cleanup the code replacement in decompose_long_opts ()
10396 * - try a coalescing phase after liveness analysis
10397 * - add float -> vreg conversion + local optimizations on !x86
10398 * - figure out how to handle decomposed branches during optimizations, ie.
10399 * compare+branch, op_jump_table+op_br etc.
10400 * - promote RuntimeXHandles to vregs
10401 * - vtype cleanups:
10402 * - add a NEW_VARLOADA_VREG macro
10403 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10404 * accessing vtype fields.
10405 * - get rid of I8CONST on 64 bit platforms
10406 * - dealing with the increase in code size due to branches created during opcode
10408 * - use extended basic blocks
10409 * - all parts of the JIT
10410 * - handle_global_vregs () && local regalloc
10411 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10412 * - sources of increase in code size:
10415 * - isinst and castclass
10416 * - lvregs not allocated to global registers even if used multiple times
10417 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10419 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10420 * - add all micro optimizations from the old JIT
10421 * - put tree optimizations into the deadce pass
10422 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10423 * specific function.
10424 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10425 * fcompare + branchCC.
10426 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10427 * running generics.exe.
10428 * - create a helper function for allocating a stack slot, taking into account
10429 * MONO_CFG_HAS_SPILLUP.
10430 * - merge new GC changes in mini.c.
10432 * - merge the ia64 switch changes.
10433 * - merge the mips conditional changes.
10434 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10435 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10436 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10437 * - optimize mono_regstate2_alloc_int/float.
10438 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10439 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10440 * parts of the tree could be separated by other instructions, killing the tree
10441 * arguments, or stores killing loads etc. Also, should we fold loads into other
10442 * instructions if the result of the load is used multiple times ?
10443 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10444 * - LAST MERGE: 108395.
10445 * - when returning vtypes in registers, generate IR and append it to the end of the
10446 * last bb instead of doing it in the epilog.
10447 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10448 * ones in inssel.h.
10449 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10457 - When to decompose opcodes:
10458 - earlier: this makes some optimizations hard to implement, since the low level IR
10459 no longer contains the neccessary information. But it is easier to do.
10460 - later: harder to implement, enables more optimizations.
10461 - Branches inside bblocks:
10462 - created when decomposing complex opcodes.
10463 - branches to another bblock: harmless, but not tracked by the branch
10464 optimizations, so need to branch to a label at the start of the bblock.
10465 - branches to inside the same bblock: very problematic, trips up the local
10466 reg allocator. Can be fixed by spitting the current bblock, but that is a
10467 complex operation, since some local vregs can become global vregs etc.
10468 - Local/global vregs:
10469 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10470 local register allocator.
10471 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10472 structure, created by mono_create_var (). Assigned to hregs or the stack by
10473 the global register allocator.
10474 - When to do optimizations like alu->alu_imm:
10475 - earlier -> saves work later on since the IR will be smaller/simpler
10476 - later -> can work on more instructions
10477 - Handling of valuetypes:
10478 - When a vtype is pushed on the stack, a new tempotary is created, an
10479 instruction computing its address (LDADDR) is emitted and pushed on
10480 the stack. Need to optimize cases when the vtype is used immediately as in
10481 argument passing, stloc etc.
10482 - Instead of the to_end stuff in the old JIT, simply call the function handling
10483 the values on the stack before emitting the last instruction of the bb.