2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > -1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 MonoMethodSignature *helper_sig_class_init_trampoline;
132 MonoMethodSignature *helper_sig_domain_get;
133 MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(ins, arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2177 if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call);
2196 inline static MonoInst*
2197 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2199 inline static MonoCallInst *
2200 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2201 MonoInst **args, int calli, int virtual)
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2208 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2211 call->signature = sig;
2213 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2215 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2216 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2219 temp->backend.is_pinvoke = sig->pinvoke;
2222 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2223 * address of return value to increase optimization opportunities.
2224 * Before vtype decomposition, the dreg of the call ins itself represents the
2225 * fact the call modifies the return value. After decomposition, the call will
2226 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2227 * will be transformed into an LDADDR.
2229 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2230 loada->dreg = alloc_preg (cfg);
2231 loada->inst_p0 = temp;
2232 /* We reference the call too since call->dreg could change during optimization */
2233 loada->inst_p1 = call;
2234 MONO_ADD_INS (cfg->cbb, loada);
2236 call->inst.dreg = temp->dreg;
2238 call->vret_var = loada;
2239 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2240 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2242 #ifdef MONO_ARCH_SOFT_FLOAT
2244 * If the call has a float argument, we would need to do an r8->r4 conversion using
2245 * an icall, but that cannot be done during the call sequence since it would clobber
2246 * the call registers + the stack. So we do it before emitting the call.
2248 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2250 MonoInst *in = call->args [i];
2252 if (i >= sig->hasthis)
2253 t = sig->params [i - sig->hasthis];
2255 t = &mono_defaults.int_class->byval_arg;
2256 t = mono_type_get_underlying_type (t);
2258 if (!t->byref && t->type == MONO_TYPE_R4) {
2259 MonoInst *iargs [1];
2263 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2265 /* The result will be in an int vreg */
2266 call->args [i] = conv;
2271 mono_arch_emit_call (cfg, call);
2273 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2274 cfg->flags |= MONO_CFG_HAS_CALLS;
2279 inline static MonoInst*
2280 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2282 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2284 call->inst.sreg1 = addr->dreg;
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2291 inline static MonoInst*
2292 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2294 #ifdef MONO_ARCH_RGCTX_REG
2296 int rgctx_reg = mono_alloc_preg (cfg);
2298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2299 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2300 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2301 return (MonoInst*)call;
2303 g_assert_not_reached ();
2309 mono_emit_imt_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2310 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2312 gboolean virtual = this != NULL;
2313 gboolean enable_for_aot = TRUE;
2316 if (method->string_ctor) {
2317 /* Create the real signature */
2318 /* FIXME: Cache these */
2319 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup (sig);
2320 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2325 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2327 if (this && sig->hasthis &&
2328 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2329 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2330 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2332 call->method = method;
2334 call->inst.flags |= MONO_INST_HAS_METHOD;
2335 call->inst.inst_left = this;
2338 int vtable_reg, slot_reg, this_reg;
2340 this_reg = this->dreg;
2342 if ((!cfg->compile_aot || enable_for_aot) &&
2343 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2344 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2345 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2347 * the method is not virtual, we just need to ensure this is not null
2348 * and then we can call the method directly.
2350 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2351 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2354 if (!method->string_ctor) {
2355 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2356 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2357 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2360 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2362 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2364 return (MonoInst*)call;
2367 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2368 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2369 /* Make a call to delegate->invoke_impl */
2370 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2371 call->inst.inst_basereg = this_reg;
2372 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2373 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2375 return (MonoInst*)call;
2379 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2380 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2381 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2383 * the method is virtual, but we can statically dispatch since either
2384 * it's class or the method itself are sealed.
2385 * But first we need to ensure it's not a null reference.
2387 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2388 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2391 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2399 /* Initialize method->slot */
2400 mono_class_setup_vtable (method->klass);
2402 vtable_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2404 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2406 #ifdef MONO_ARCH_HAVE_IMT
2408 guint32 imt_slot = mono_method_get_imt_slot (method);
2409 emit_imt_argument (cfg, call, imt_arg);
2410 slot_reg = vtable_reg;
2411 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2414 if (slot_reg == -1) {
2415 slot_reg = alloc_preg (cfg);
2416 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2417 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2420 slot_reg = vtable_reg;
2421 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2424 call->inst.sreg1 = slot_reg;
2425 call->virtual = TRUE;
2428 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2430 return (MonoInst*)call;
2433 static inline MonoInst*
2434 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2435 MonoInst **args, MonoInst *this)
2437 return mono_emit_imt_method_call (cfg, method, sig, args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 inline static MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 get_memcpy_method (void)
2469 static MonoMethod *memcpy_method = NULL;
2470 if (!memcpy_method) {
2471 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2473 g_error ("Old corlib found. Install a new one");
2475 return memcpy_method;
2479 * Emit code to copy a valuetype of type @klass whose address is stored in
2480 * @src->dreg to memory whose address is stored at @dest->dreg.
2483 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2485 MonoInst *iargs [3];
2488 MonoMethod *memcpy_method;
2492 * This check breaks with spilled vars... need to handle it during verification anyway.
2493 * g_assert (klass && klass == src->klass && klass == dest->klass);
2497 n = mono_class_native_size (klass, &align);
2499 n = mono_class_value_size (klass, &align);
2501 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2502 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2503 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2507 EMIT_NEW_ICONST (cfg, iargs [2], n);
2509 memcpy_method = get_memcpy_method ();
2510 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
2515 get_memset_method (void)
2517 static MonoMethod *memset_method = NULL;
2518 if (!memset_method) {
2519 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2521 g_error ("Old corlib found. Install a new one");
2523 return memset_method;
2527 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2529 MonoInst *iargs [3];
2532 MonoMethod *memset_method;
2534 /* FIXME: Optimize this for the case when dest is an LDADDR */
2536 mono_class_init (klass);
2537 n = mono_class_value_size (klass, &align);
2539 if (n <= sizeof (gpointer) * 5) {
2540 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2543 memset_method = get_memset_method ();
2545 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2546 EMIT_NEW_ICONST (cfg, iargs [2], n);
2547 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
2552 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2554 MonoInst *this = NULL;
2556 g_assert (!method->klass->valuetype);
2558 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2559 EMIT_NEW_ARGLOAD (cfg, this, 0);
2561 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2562 MonoInst *mrgctx_loc, *mrgctx_var;
2565 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2567 mrgctx_loc = mono_get_vtable_var (cfg);
2568 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2571 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2572 MonoInst *vtable_loc, *vtable_var;
2576 vtable_loc = mono_get_vtable_var (cfg);
2577 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2579 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2580 MonoInst *mrgctx_var = vtable_var;
2583 vtable_reg = alloc_preg (cfg);
2584 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2585 vtable_var->type = STACK_PTR;
2591 int vtable_reg, res_reg;
2593 vtable_reg = alloc_preg (cfg);
2594 res_reg = alloc_preg (cfg);
2595 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2600 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2601 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2602 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2606 emit_get_rgctx_other_table_ptr (MonoCompile *cfg, MonoInst *rgc_ptr, int slot)
2608 MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
2609 guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
2611 return mono_emit_native_call (cfg, tramp, sig, &rgc_ptr);
2615 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2616 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2618 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2619 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, cfg->generic_context);
2621 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2625 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2626 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2628 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2629 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, cmethod, rgctx_type, cfg->generic_context);
2631 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2635 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2636 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2638 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2639 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, field, rgctx_type, cfg->generic_context);
2641 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2645 emit_get_rgctx_method_rgctx (MonoCompile *cfg, int context_used,
2646 MonoInst *rgctx, MonoMethod *rgctx_method)
2648 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2649 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
2650 MONO_RGCTX_INFO_METHOD_RGCTX, cfg->generic_context);
2652 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2656 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2660 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2662 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2663 // Can't encode method ref
2664 cfg->disable_aot = TRUE;
2667 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2668 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2670 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2672 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2677 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2681 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2682 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2683 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2684 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2686 obj_reg = sp [0]->dreg;
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2690 /* FIXME: generics */
2691 g_assert (klass->rank == 0);
2694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2695 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2701 MonoInst *element_class;
2703 /* This assertion is from the unboxcast insn */
2704 g_assert (klass->rank == 0);
2706 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2707 klass->element_class, MONO_RGCTX_INFO_KLASS);
2709 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2712 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2715 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2716 MONO_ADD_INS (cfg->cbb, add);
2717 add->type = STACK_MP;
2724 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2726 MonoInst *iargs [2];
2729 if (cfg->opt & MONO_OPT_SHARED) {
2730 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2731 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2733 alloc_ftn = mono_object_new;
2734 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2735 /* This happens often in argument checking code, eg. throw new FooException... */
2736 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2737 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2738 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2740 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2741 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2744 if (managed_alloc) {
2745 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2746 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, NULL);
2748 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2750 guint32 lw = vtable->klass->instance_size;
2751 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2752 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2753 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2756 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2760 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2764 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2767 MonoInst *iargs [2];
2768 MonoMethod *managed_alloc = NULL;
2772 FIXME: we cannot get managed_alloc here because we can't get
2773 the class's vtable (because it's not a closed class)
2775 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2776 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2779 if (cfg->opt & MONO_OPT_SHARED) {
2780 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2781 iargs [1] = data_inst;
2782 alloc_ftn = mono_object_new;
2784 g_assert (!cfg->compile_aot);
2786 if (managed_alloc) {
2787 iargs [0] = data_inst;
2788 return mono_emit_method_call (cfg, managed_alloc,
2789 mono_method_signature (managed_alloc), iargs, NULL);
2792 iargs [0] = data_inst;
2793 alloc_ftn = mono_object_new_specific;
2796 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2800 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2802 MonoInst *alloc, *ins;
2804 if (mono_class_is_nullable (klass)) {
2805 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2806 // Can't encode method ref
2807 cfg->disable_aot = TRUE;
2808 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2811 alloc = handle_alloc (cfg, klass, TRUE);
2813 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2819 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, MonoInst *data_inst)
2821 MonoInst *alloc, *ins;
2823 g_assert (!mono_class_is_nullable (klass));
2825 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2827 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2833 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2835 MonoBasicBlock *is_null_bb;
2836 int obj_reg = src->dreg;
2837 int vtable_reg = alloc_preg (cfg);
2839 NEW_BBLOCK (cfg, is_null_bb);
2841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2842 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2844 if (mini_get_debug_options ()->better_cast_details) {
2845 int to_klass_reg = alloc_preg (cfg);
2846 int klass_reg = alloc_preg (cfg);
2847 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2850 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2854 MONO_ADD_INS (cfg->cbb, tls_get);
2855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2859 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2863 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2865 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2867 int klass_reg = alloc_preg (cfg);
2869 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2871 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2872 /* the remoting code is broken, access the class for now */
2874 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2877 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2880 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2883 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2887 MONO_START_BB (cfg, is_null_bb);
2889 /* Reset the variables holding the cast details */
2890 if (mini_get_debug_options ()->better_cast_details) {
2891 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2893 MONO_ADD_INS (cfg->cbb, tls_get);
2894 /* It is enough to reset the from field */
2895 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2902 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2905 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2906 int obj_reg = src->dreg;
2907 int vtable_reg = alloc_preg (cfg);
2908 int res_reg = alloc_preg (cfg);
2910 NEW_BBLOCK (cfg, is_null_bb);
2911 NEW_BBLOCK (cfg, false_bb);
2912 NEW_BBLOCK (cfg, end_bb);
2914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2917 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2919 /* the is_null_bb target simply copies the input register to the output */
2920 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2922 int klass_reg = alloc_preg (cfg);
2924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2927 int rank_reg = alloc_preg (cfg);
2928 int eclass_reg = alloc_preg (cfg);
2930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2933 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2935 if (klass->cast_class == mono_defaults.object_class) {
2936 int parent_reg = alloc_preg (cfg);
2937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2938 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2939 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2941 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2942 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2943 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2945 } else if (klass->cast_class == mono_defaults.enum_class) {
2946 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2948 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2949 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2951 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2952 /* Check that the object is a vector too */
2953 int bounds_reg = alloc_preg (cfg);
2954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2959 /* the is_null_bb target simply copies the input register to the output */
2960 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2962 } else if (mono_class_is_nullable (klass)) {
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2964 /* the is_null_bb target simply copies the input register to the output */
2965 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2967 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2968 /* the remoting code is broken, access the class for now */
2970 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2980 /* the is_null_bb target simply copies the input register to the output */
2981 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2986 MONO_START_BB (cfg, false_bb);
2988 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2991 MONO_START_BB (cfg, is_null_bb);
2993 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2994 ins->type = STACK_OBJ;
2997 MONO_START_BB (cfg, end_bb);
3003 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3005 /* This opcode takes as input an object reference and a class, and returns:
3006 0) if the object is an instance of the class,
3007 1) if the object is not instance of the class,
3008 2) if the object is a proxy whose type cannot be determined */
3011 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3012 int obj_reg = src->dreg;
3013 int dreg = alloc_ireg (cfg);
3015 int klass_reg = alloc_preg (cfg);
3017 NEW_BBLOCK (cfg, true_bb);
3018 NEW_BBLOCK (cfg, false_bb);
3019 NEW_BBLOCK (cfg, false2_bb);
3020 NEW_BBLOCK (cfg, end_bb);
3021 NEW_BBLOCK (cfg, no_proxy_bb);
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3026 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3027 NEW_BBLOCK (cfg, interface_fail_bb);
3029 tmp_reg = alloc_preg (cfg);
3030 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3031 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3032 MONO_START_BB (cfg, interface_fail_bb);
3033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3035 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3037 tmp_reg = alloc_preg (cfg);
3038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3040 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3042 tmp_reg = alloc_preg (cfg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3046 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3047 tmp_reg = alloc_preg (cfg);
3048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3051 tmp_reg = alloc_preg (cfg);
3052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3056 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3059 MONO_START_BB (cfg, no_proxy_bb);
3061 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3064 MONO_START_BB (cfg, false_bb);
3066 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3069 MONO_START_BB (cfg, false2_bb);
3071 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3074 MONO_START_BB (cfg, true_bb);
3076 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3078 MONO_START_BB (cfg, end_bb);
3081 MONO_INST_NEW (cfg, ins, OP_ICONST);
3083 ins->type = STACK_I4;
3089 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3091 /* This opcode takes as input an object reference and a class, and returns:
3092 0) if the object is an instance of the class,
3093 1) if the object is a proxy whose type cannot be determined
3094 an InvalidCastException exception is thrown otherwhise*/
3097 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3098 int obj_reg = src->dreg;
3099 int dreg = alloc_ireg (cfg);
3100 int tmp_reg = alloc_preg (cfg);
3101 int klass_reg = alloc_preg (cfg);
3103 NEW_BBLOCK (cfg, end_bb);
3104 NEW_BBLOCK (cfg, ok_result_bb);
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3109 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3110 NEW_BBLOCK (cfg, interface_fail_bb);
3112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3113 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3114 MONO_START_BB (cfg, interface_fail_bb);
3115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3117 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3119 tmp_reg = alloc_preg (cfg);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3122 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3124 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3128 NEW_BBLOCK (cfg, no_proxy_bb);
3130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3132 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3134 tmp_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3138 tmp_reg = alloc_preg (cfg);
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3143 NEW_BBLOCK (cfg, fail_1_bb);
3145 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3147 MONO_START_BB (cfg, fail_1_bb);
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 MONO_START_BB (cfg, no_proxy_bb);
3154 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3157 MONO_START_BB (cfg, ok_result_bb);
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3161 MONO_START_BB (cfg, end_bb);
3164 MONO_INST_NEW (cfg, ins, OP_ICONST);
3166 ins->type = STACK_I4;
3171 static G_GNUC_UNUSED MonoInst*
3172 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3174 gpointer *trampoline;
3175 MonoInst *obj, *method_ins, *tramp_ins;
3179 obj = handle_alloc (cfg, klass, FALSE);
3181 /* Inline the contents of mono_delegate_ctor */
3183 /* Set target field */
3184 /* Optimize away setting of NULL target */
3185 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3186 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3188 /* Set method field */
3189 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3190 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3193 * To avoid looking up the compiled code belonging to the target method
3194 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3195 * store it, and we fill it after the method has been compiled.
3197 if (!cfg->compile_aot && !method->dynamic) {
3198 MonoInst *code_slot_ins;
3200 domain = mono_domain_get ();
3201 mono_domain_lock (domain);
3202 if (!domain->method_code_hash)
3203 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3204 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3206 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3207 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3209 mono_domain_unlock (domain);
3211 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3212 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3215 /* Set invoke_impl field */
3216 trampoline = mono_create_delegate_trampoline (klass);
3217 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3220 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3226 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3228 MonoJitICallInfo *info;
3230 /* Need to register the icall so it gets an icall wrapper */
3231 info = mono_get_array_new_va_icall (rank);
3233 cfg->flags |= MONO_CFG_HAS_VARARGS;
3235 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3236 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3240 mono_emit_load_got_addr (MonoCompile *cfg)
3242 MonoInst *getaddr, *dummy_use;
3244 if (!cfg->got_var || cfg->got_var_allocated)
3247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3248 getaddr->dreg = cfg->got_var->dreg;
3250 /* Add it to the start of the first bblock */
3251 if (cfg->bb_entry->code) {
3252 getaddr->next = cfg->bb_entry->code;
3253 cfg->bb_entry->code = getaddr;
3256 MONO_ADD_INS (cfg->bb_entry, getaddr);
3258 cfg->got_var_allocated = TRUE;
3261 * Add a dummy use to keep the got_var alive, since real uses might
3262 * only be generated by the back ends.
3263 * Add it to end_bblock, so the variable's lifetime covers the whole
3265 * It would be better to make the usage of the got var explicit in all
3266 * cases when the backend needs it (i.e. calls, throw etc.), so this
3267 * wouldn't be needed.
3269 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3270 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3273 #define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
3276 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3278 MonoMethodHeader *header = mono_method_get_header (method);
3280 #ifdef MONO_ARCH_SOFT_FLOAT
3281 MonoMethodSignature *sig = mono_method_signature (method);
3285 if (cfg->generic_sharing_context)
3288 #ifdef MONO_ARCH_HAVE_LMF_OPS
3289 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3290 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3291 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3295 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3296 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3297 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3298 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3299 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3300 (method->klass->marshalbyref) ||
3301 !header || header->num_clauses)
3304 /* also consider num_locals? */
3305 /* Do the size check early to avoid creating vtables */
3306 if (getenv ("MONO_INLINELIMIT")) {
3307 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3310 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3314 * if we can initialize the class of the method right away, we do,
3315 * otherwise we don't allow inlining if the class needs initialization,
3316 * since it would mean inserting a call to mono_runtime_class_init()
3317 * inside the inlined code
3319 if (!(cfg->opt & MONO_OPT_SHARED)) {
3320 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3321 if (cfg->run_cctors && method->klass->has_cctor) {
3322 if (!method->klass->runtime_info)
3323 /* No vtable created yet */
3325 vtable = mono_class_vtable (cfg->domain, method->klass);
3328 /* This makes so that inline cannot trigger */
3329 /* .cctors: too many apps depend on them */
3330 /* running with a specific order... */
3331 if (! vtable->initialized)
3333 mono_runtime_class_init (vtable);
3335 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3336 if (!method->klass->runtime_info)
3337 /* No vtable created yet */
3339 vtable = mono_class_vtable (cfg->domain, method->klass);
3342 if (!vtable->initialized)
3347 * If we're compiling for shared code
3348 * the cctor will need to be run at aot method load time, for example,
3349 * or at the end of the compilation of the inlining method.
3351 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3356 * CAS - do not inline methods with declarative security
3357 * Note: this has to be before any possible return TRUE;
3359 if (mono_method_has_declsec (method))
3362 #ifdef MONO_ARCH_SOFT_FLOAT
3364 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3366 for (i = 0; i < sig->param_count; ++i)
3367 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3375 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3377 if (vtable->initialized && !cfg->compile_aot)
3380 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3383 if (!mono_class_needs_cctor_run (vtable->klass, method))
3386 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3387 /* The initialization is already done before the method is called */
3394 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3398 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3400 mono_class_init (klass);
3401 size = mono_class_array_element_size (klass);
3403 mult_reg = alloc_preg (cfg);
3404 array_reg = arr->dreg;
3405 index_reg = index->dreg;
3407 #if SIZEOF_VOID_P == 8
3408 /* The array reg is 64 bits but the index reg is only 32 */
3409 index2_reg = alloc_preg (cfg);
3410 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3412 index2_reg = index_reg;
3415 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3417 #if defined(__i386__) || defined(__x86_64__)
3418 if (size == 1 || size == 2 || size == 4 || size == 8) {
3419 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3421 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3422 ins->type = STACK_PTR;
3428 add_reg = alloc_preg (cfg);
3430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3432 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3433 ins->type = STACK_PTR;
3434 MONO_ADD_INS (cfg->cbb, ins);
3439 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3441 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3443 int bounds_reg = alloc_preg (cfg);
3444 int add_reg = alloc_preg (cfg);
3445 int mult_reg = alloc_preg (cfg);
3446 int mult2_reg = alloc_preg (cfg);
3447 int low1_reg = alloc_preg (cfg);
3448 int low2_reg = alloc_preg (cfg);
3449 int high1_reg = alloc_preg (cfg);
3450 int high2_reg = alloc_preg (cfg);
3451 int realidx1_reg = alloc_preg (cfg);
3452 int realidx2_reg = alloc_preg (cfg);
3453 int sum_reg = alloc_preg (cfg);
3458 mono_class_init (klass);
3459 size = mono_class_array_element_size (klass);
3461 index1 = index_ins1->dreg;
3462 index2 = index_ins2->dreg;
3464 /* range checking */
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3466 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3469 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3470 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3472 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3473 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3474 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3477 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3478 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3480 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3481 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3482 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3484 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3487 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3488 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3490 ins->type = STACK_MP;
3492 MONO_ADD_INS (cfg->cbb, ins);
3499 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3503 MonoMethod *addr_method;
3506 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3509 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3511 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3512 /* emit_ldelema_2 depends on OP_LMUL */
3513 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3514 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3518 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3519 addr_method = mono_marshal_get_array_address (rank, element_size);
3520 addr = mono_emit_method_call (cfg, addr_method, addr_method->signature, sp, NULL);
3526 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3528 MonoInst *ins = NULL;
3530 static MonoClass *runtime_helpers_class = NULL;
3531 if (! runtime_helpers_class)
3532 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3533 "System.Runtime.CompilerServices", "RuntimeHelpers");
3535 if (cmethod->klass == mono_defaults.string_class) {
3536 if (strcmp (cmethod->name, "get_Chars") == 0) {
3537 int dreg = alloc_ireg (cfg);
3538 int index_reg = alloc_preg (cfg);
3539 int mult_reg = alloc_preg (cfg);
3540 int add_reg = alloc_preg (cfg);
3542 #if SIZEOF_VOID_P == 8
3543 /* The array reg is 64 bits but the index reg is only 32 */
3544 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3546 index_reg = args [1]->dreg;
3548 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3550 #if defined(__i386__) || defined(__x86_64__)
3551 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3552 add_reg = ins->dreg;
3553 /* Avoid a warning */
3555 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3559 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3560 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3561 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3563 type_from_op (ins, NULL, NULL);
3565 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3566 int dreg = alloc_ireg (cfg);
3567 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3568 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3569 type_from_op (ins, NULL, NULL);
3572 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3573 int mult_reg = alloc_preg (cfg);
3574 int add_reg = alloc_preg (cfg);
3576 /* The corlib functions check for oob already. */
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3578 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3579 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3582 } else if (cmethod->klass == mono_defaults.object_class) {
3584 if (strcmp (cmethod->name, "GetType") == 0) {
3585 int dreg = alloc_preg (cfg);
3586 int vt_reg = alloc_preg (cfg);
3587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3588 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3589 type_from_op (ins, NULL, NULL);
3592 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3593 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3594 int dreg = alloc_ireg (cfg);
3595 int t1 = alloc_ireg (cfg);
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3599 ins->type = STACK_I4;
3603 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3604 MONO_INST_NEW (cfg, ins, OP_NOP);
3605 MONO_ADD_INS (cfg->cbb, ins);
3609 } else if (cmethod->klass == mono_defaults.array_class) {
3610 if (cmethod->name [0] != 'g')
3613 if (strcmp (cmethod->name, "get_Rank") == 0) {
3614 int dreg = alloc_ireg (cfg);
3615 int vtable_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3617 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3619 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3620 type_from_op (ins, NULL, NULL);
3623 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3624 int dreg = alloc_ireg (cfg);
3626 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3627 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3628 type_from_op (ins, NULL, NULL);
3633 } else if (cmethod->klass == runtime_helpers_class) {
3635 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3636 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3640 } else if (cmethod->klass == mono_defaults.thread_class) {
3641 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3642 ins->dreg = alloc_preg (cfg);
3643 ins->type = STACK_OBJ;
3644 MONO_ADD_INS (cfg->cbb, ins);
3646 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3647 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3648 MONO_ADD_INS (cfg->cbb, ins);
3651 } else if (mini_class_is_system_array (cmethod->klass) &&
3652 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3653 MonoInst *addr, *store, *load;
3654 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3656 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3657 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3660 } else if (cmethod->klass->image == mono_defaults.corlib &&
3661 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3662 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3665 #if SIZEOF_VOID_P == 8
3666 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3667 /* 64 bit reads are already atomic */
3668 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3669 ins->dreg = mono_alloc_preg (cfg);
3670 ins->inst_basereg = args [0]->dreg;
3671 ins->inst_offset = 0;
3672 MONO_ADD_INS (cfg->cbb, ins);
3676 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3677 if (strcmp (cmethod->name, "Increment") == 0) {
3678 MonoInst *ins_iconst;
3681 if (fsig->params [0]->type == MONO_TYPE_I4)
3682 opcode = OP_ATOMIC_ADD_NEW_I4;
3683 #if SIZEOF_VOID_P == 8
3684 else if (fsig->params [0]->type == MONO_TYPE_I8)
3685 opcode = OP_ATOMIC_ADD_NEW_I8;
3688 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3689 ins_iconst->inst_c0 = 1;
3690 ins_iconst->dreg = mono_alloc_ireg (cfg);
3691 MONO_ADD_INS (cfg->cbb, ins_iconst);
3693 MONO_INST_NEW (cfg, ins, opcode);
3694 ins->dreg = mono_alloc_ireg (cfg);
3695 ins->inst_basereg = args [0]->dreg;
3696 ins->inst_offset = 0;
3697 ins->sreg2 = ins_iconst->dreg;
3698 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3699 MONO_ADD_INS (cfg->cbb, ins);
3701 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3702 MonoInst *ins_iconst;
3705 if (fsig->params [0]->type == MONO_TYPE_I4)
3706 opcode = OP_ATOMIC_ADD_NEW_I4;
3707 #if SIZEOF_VOID_P == 8
3708 else if (fsig->params [0]->type == MONO_TYPE_I8)
3709 opcode = OP_ATOMIC_ADD_NEW_I8;
3712 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3713 ins_iconst->inst_c0 = -1;
3714 ins_iconst->dreg = mono_alloc_ireg (cfg);
3715 MONO_ADD_INS (cfg->cbb, ins_iconst);
3717 MONO_INST_NEW (cfg, ins, opcode);
3718 ins->dreg = mono_alloc_ireg (cfg);
3719 ins->inst_basereg = args [0]->dreg;
3720 ins->inst_offset = 0;
3721 ins->sreg2 = ins_iconst->dreg;
3722 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3723 MONO_ADD_INS (cfg->cbb, ins);
3725 } else if (strcmp (cmethod->name, "Add") == 0) {
3728 if (fsig->params [0]->type == MONO_TYPE_I4)
3729 opcode = OP_ATOMIC_ADD_NEW_I4;
3730 #if SIZEOF_VOID_P == 8
3731 else if (fsig->params [0]->type == MONO_TYPE_I8)
3732 opcode = OP_ATOMIC_ADD_NEW_I8;
3736 MONO_INST_NEW (cfg, ins, opcode);
3737 ins->dreg = mono_alloc_ireg (cfg);
3738 ins->inst_basereg = args [0]->dreg;
3739 ins->inst_offset = 0;
3740 ins->sreg2 = args [1]->dreg;
3741 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3742 MONO_ADD_INS (cfg->cbb, ins);
3745 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3747 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3748 if (strcmp (cmethod->name, "Exchange") == 0) {
3751 if (fsig->params [0]->type == MONO_TYPE_I4)
3752 opcode = OP_ATOMIC_EXCHANGE_I4;
3753 #if SIZEOF_VOID_P == 8
3754 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3755 (fsig->params [0]->type == MONO_TYPE_I) ||
3756 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3757 opcode = OP_ATOMIC_EXCHANGE_I8;
3759 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3760 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3761 opcode = OP_ATOMIC_EXCHANGE_I4;
3766 MONO_INST_NEW (cfg, ins, opcode);
3767 ins->dreg = mono_alloc_ireg (cfg);
3768 ins->inst_basereg = args [0]->dreg;
3769 ins->inst_offset = 0;
3770 ins->sreg2 = args [1]->dreg;
3771 MONO_ADD_INS (cfg->cbb, ins);
3773 switch (fsig->params [0]->type) {
3775 ins->type = STACK_I4;
3779 ins->type = STACK_I8;
3781 case MONO_TYPE_OBJECT:
3782 ins->type = STACK_OBJ;
3785 g_assert_not_reached ();
3788 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3790 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3792 * Can't implement CompareExchange methods this way since they have
3793 * three arguments. We can implement one of the common cases, where the new
3794 * value is a constant.
3796 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3797 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3798 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3799 ins->dreg = alloc_ireg (cfg);
3800 ins->sreg1 = args [0]->dreg;
3801 ins->sreg2 = args [1]->dreg;
3802 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3803 ins->type = STACK_I4;
3804 MONO_ADD_INS (cfg->cbb, ins);
3806 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3808 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3812 } else if (cmethod->klass->image == mono_defaults.corlib) {
3813 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3814 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3815 MONO_INST_NEW (cfg, ins, OP_BREAK);
3816 MONO_ADD_INS (cfg->cbb, ins);
3819 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3820 && strcmp (cmethod->klass->name, "Environment") == 0) {
3821 #ifdef PLATFORM_WIN32
3822 EMIT_NEW_ICONST (cfg, ins, 1);
3824 EMIT_NEW_ICONST (cfg, ins, 0);
3828 } else if (cmethod->klass == mono_defaults.math_class) {
3830 * There is general branches code for Min/Max, but it does not work for
3832 * http://everything2.com/?node_id=1051618
3836 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3840 * This entry point could be used later for arbitrary method
3843 inline static MonoInst*
3844 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3845 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3847 if (method->klass == mono_defaults.string_class) {
3848 /* managed string allocation support */
3849 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3850 MonoInst *iargs [2];
3851 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3852 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3855 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3856 iargs [1] = args [0];
3857 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, this);
3864 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp, MonoInst **args)
3866 MonoInst *store, *temp;
3869 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3870 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3873 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3874 * would be different than the MonoInst's used to represent arguments, and
3875 * the ldelema implementation can't deal with that.
3876 * Solution: When ldelema is used on an inline argument, create a var for
3877 * it, emit ldelema on that var, and emit the saving code below in
3878 * inline_method () if needed.
3880 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3882 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, *sp);
3883 store->cil_code = sp [0]->cil_code;
3888 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3889 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3891 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3893 mono_inline_called_method_name_limit = NULL;
3894 static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) {
3895 char *called_method_name = mono_method_full_name (called_method, TRUE);
3898 if (mono_inline_called_method_name_limit == NULL) {
3899 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3900 if (limit_string != NULL) {
3901 mono_inline_called_method_name_limit = limit_string;
3903 mono_inline_called_method_name_limit = (char *) "";
3907 strncmp_result = strncmp (called_method_name, mono_inline_called_method_name_limit, strlen (mono_inline_called_method_name_limit));
3908 g_free (called_method_name);
3910 //return (strncmp_result <= 0);
3911 return (strncmp_result == 0);
3915 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3917 mono_inline_caller_method_name_limit = NULL;
3918 static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) {
3919 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3922 if (mono_inline_caller_method_name_limit == NULL) {
3923 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3924 if (limit_string != NULL) {
3925 mono_inline_caller_method_name_limit = limit_string;
3927 mono_inline_caller_method_name_limit = (char *) "";
3931 strncmp_result = strncmp (caller_method_name, mono_inline_caller_method_name_limit, strlen (mono_inline_caller_method_name_limit));
3932 g_free (caller_method_name);
3934 //return (strncmp_result <= 0);
3935 return (strncmp_result == 0);
3940 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3941 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3943 MonoInst *ins, *rvar = NULL;
3944 MonoMethodHeader *cheader;
3945 MonoBasicBlock *ebblock, *sbblock;
3947 MonoMethod *prev_inlined_method;
3948 MonoInst **prev_locals, **prev_args;
3949 MonoType **prev_arg_types;
3950 guint prev_real_offset;
3951 GHashTable *prev_cbb_hash;
3952 MonoBasicBlock **prev_cil_offset_to_bb;
3953 MonoBasicBlock *prev_cbb;
3954 unsigned char* prev_cil_start;
3955 guint32 prev_cil_offset_to_bb_len;
3956 MonoMethod *prev_current_method;
3957 MonoGenericContext *prev_generic_context;
3959 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3961 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3962 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3965 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3966 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3970 if (cfg->verbose_level > 2)
3971 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3973 if (!cmethod->inline_info) {
3974 mono_jit_stats.inlineable_methods++;
3975 cmethod->inline_info = 1;
3977 /* allocate space to store the return value */
3978 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3979 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3982 /* allocate local variables */
3983 cheader = mono_method_get_header (cmethod);
3984 prev_locals = cfg->locals;
3985 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3986 for (i = 0; i < cheader->num_locals; ++i)
3987 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3989 /* allocate start and end blocks */
3990 /* This is needed so if the inline is aborted, we can clean up */
3991 NEW_BBLOCK (cfg, sbblock);
3992 sbblock->real_offset = real_offset;
3994 NEW_BBLOCK (cfg, ebblock);
3995 ebblock->block_num = cfg->num_bblocks++;
3996 ebblock->real_offset = real_offset;
3998 prev_args = cfg->args;
3999 prev_arg_types = cfg->arg_types;
4000 prev_inlined_method = cfg->inlined_method;
4001 cfg->inlined_method = cmethod;
4002 cfg->ret_var_set = FALSE;
4003 prev_real_offset = cfg->real_offset;
4004 prev_cbb_hash = cfg->cbb_hash;
4005 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4006 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4007 prev_cil_start = cfg->cil_start;
4008 prev_cbb = cfg->cbb;
4009 prev_current_method = cfg->current_method;
4010 prev_generic_context = cfg->generic_context;
4012 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4014 cfg->inlined_method = prev_inlined_method;
4015 cfg->real_offset = prev_real_offset;
4016 cfg->cbb_hash = prev_cbb_hash;
4017 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4018 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4019 cfg->cil_start = prev_cil_start;
4020 cfg->locals = prev_locals;
4021 cfg->args = prev_args;
4022 cfg->arg_types = prev_arg_types;
4023 cfg->current_method = prev_current_method;
4024 cfg->generic_context = prev_generic_context;
4026 if ((costs >= 0 && costs < 60) || inline_allways) {
4027 if (cfg->verbose_level > 2)
4028 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4030 mono_jit_stats.inlined_methods++;
4032 /* always add some code to avoid block split failures */
4033 MONO_INST_NEW (cfg, ins, OP_NOP);
4034 MONO_ADD_INS (prev_cbb, ins);
4036 prev_cbb->next_bb = sbblock;
4037 link_bblock (cfg, prev_cbb, sbblock);
4040 * Get rid of the begin and end bblocks if possible to aid local
4043 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4045 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4046 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4048 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4049 MonoBasicBlock *prev = ebblock->in_bb [0];
4050 mono_merge_basic_blocks (cfg, prev, ebblock);
4058 * If the inlined method contains only a throw, then the ret var is not
4059 * set, so set it to a dummy value.
4061 if (!cfg->ret_var_set) {
4062 static double r8_0 = 0.0;
4064 switch (rvar->type) {
4066 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4069 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4074 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4077 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4078 ins->type = STACK_R8;
4079 ins->inst_p0 = (void*)&r8_0;
4080 ins->dreg = rvar->dreg;
4081 MONO_ADD_INS (cfg->cbb, ins);
4084 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4087 g_assert_not_reached ();
4091 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4096 if (cfg->verbose_level > 2)
4097 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4098 cfg->exception_type = MONO_EXCEPTION_NONE;
4099 mono_loader_clear_error ();
4101 /* This gets rid of the newly added bblocks */
4102 cfg->cbb = prev_cbb;
4108 * Some of these comments may well be out-of-date.
4109 * Design decisions: we do a single pass over the IL code (and we do bblock
4110 * splitting/merging in the few cases when it's required: a back jump to an IL
4111 * address that was not already seen as bblock starting point).
4112 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4113 * Complex operations are decomposed in simpler ones right away. We need to let the
4114 * arch-specific code peek and poke inside this process somehow (except when the
4115 * optimizations can take advantage of the full semantic info of coarse opcodes).
4116 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4117 * MonoInst->opcode initially is the IL opcode or some simplification of that
4118 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4119 * opcode with value bigger than OP_LAST.
4120 * At this point the IR can be handed over to an interpreter, a dumb code generator
4121 * or to the optimizing code generator that will translate it to SSA form.
4123 * Profiling directed optimizations.
4124 * We may compile by default with few or no optimizations and instrument the code
4125 * or the user may indicate what methods to optimize the most either in a config file
4126 * or through repeated runs where the compiler applies offline the optimizations to
4127 * each method and then decides if it was worth it.
4130 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4131 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4132 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4133 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4134 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4135 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4136 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4137 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4139 /* offset from br.s -> br like opcodes */
4140 #define BIG_BRANCH_OFFSET 13
4143 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4145 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4147 return b == NULL || b == bb;
4151 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4153 unsigned char *ip = start;
4154 unsigned char *target;
4157 MonoBasicBlock *bblock;
4158 const MonoOpcode *opcode;
4161 cli_addr = ip - start;
4162 i = mono_opcode_value ((const guint8 **)&ip, end);
4165 opcode = &mono_opcodes [i];
4166 switch (opcode->argument) {
4167 case MonoInlineNone:
4170 case MonoInlineString:
4171 case MonoInlineType:
4172 case MonoInlineField:
4173 case MonoInlineMethod:
4176 case MonoShortInlineR:
4183 case MonoShortInlineVar:
4184 case MonoShortInlineI:
4187 case MonoShortInlineBrTarget:
4188 target = start + cli_addr + 2 + (signed char)ip [1];
4189 GET_BBLOCK (cfg, bblock, target);
4192 GET_BBLOCK (cfg, bblock, ip);
4194 case MonoInlineBrTarget:
4195 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4196 GET_BBLOCK (cfg, bblock, target);
4199 GET_BBLOCK (cfg, bblock, ip);
4201 case MonoInlineSwitch: {
4202 guint32 n = read32 (ip + 1);
4205 cli_addr += 5 + 4 * n;
4206 target = start + cli_addr;
4207 GET_BBLOCK (cfg, bblock, target);
4209 for (j = 0; j < n; ++j) {
4210 target = start + cli_addr + (gint32)read32 (ip);
4211 GET_BBLOCK (cfg, bblock, target);
4221 g_assert_not_reached ();
4224 if (i == CEE_THROW) {
4225 unsigned char *bb_start = ip - 1;
4227 /* Find the start of the bblock containing the throw */
4229 while ((bb_start >= start) && !bblock) {
4230 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4234 bblock->out_of_line = 1;
4243 static inline MonoMethod *
4244 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4248 if (m->wrapper_type != MONO_WRAPPER_NONE)
4249 return mono_method_get_wrapper_data (m, token);
4251 method = mono_get_method_full (m->klass->image, token, klass, context);
4256 static inline MonoMethod *
4257 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4259 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4261 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4267 static inline MonoClass*
4268 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4272 if (method->wrapper_type != MONO_WRAPPER_NONE)
4273 klass = mono_method_get_wrapper_data (method, token);
4275 klass = mono_class_get_full (method->klass->image, token, context);
4277 mono_class_init (klass);
4282 * Returns TRUE if the JIT should abort inlining because "callee"
4283 * is influenced by security attributes.
4286 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4290 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4294 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4295 if (result == MONO_JIT_SECURITY_OK)
4298 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4299 /* Generate code to throw a SecurityException before the actual call/link */
4300 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4303 NEW_ICONST (cfg, args [0], 4);
4304 NEW_METHODCONST (cfg, args [1], caller);
4305 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, mono_method_signature (secman->linkdemandsecurityexception), args, NULL);
4306 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4307 /* don't hide previous results */
4308 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4309 cfg->exception_data = result;
4317 method_access_exception (void)
4319 static MonoMethod *method = NULL;
4322 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4323 method = mono_class_get_method_from_name (secman->securitymanager,
4324 "MethodAccessException", 2);
4331 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4332 MonoBasicBlock *bblock, unsigned char *ip)
4334 MonoMethod *thrower = method_access_exception ();
4337 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4338 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4339 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), args, NULL);
4343 verification_exception (void)
4345 static MonoMethod *method = NULL;
4348 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4349 method = mono_class_get_method_from_name (secman->securitymanager,
4350 "VerificationException", 0);
4357 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4359 MonoMethod *thrower = verification_exception ();
4361 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), NULL, NULL);
4365 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4366 MonoBasicBlock *bblock, unsigned char *ip)
4368 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4369 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4370 gboolean is_safe = TRUE;
4372 if (!(caller_level >= callee_level ||
4373 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4374 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4379 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4383 method_is_safe (MonoMethod *method)
4386 if (strcmp (method->name, "unsafeMethod") == 0)
4393 * Check that the IL instructions at ip are the array initialization
4394 * sequence and return the pointer to the data and the size.
4397 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4400 * newarr[System.Int32]
4402 * ldtoken field valuetype ...
4403 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4405 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4406 guint32 token = read32 (ip + 7);
4407 guint32 field_token = read32 (ip + 2);
4408 guint32 field_index = field_token & 0xffffff;
4410 const char *data_ptr;
4412 MonoMethod *cmethod;
4413 MonoClass *dummy_class;
4414 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4420 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4423 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4425 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4426 case MONO_TYPE_BOOLEAN:
4430 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4431 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4432 case MONO_TYPE_CHAR:
4442 return NULL; /* stupid ARM FP swapped format */
4452 if (size > mono_type_size (field->type, &dummy_align))
4455 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4456 field_index = read32 (ip + 2) & 0xffffff;
4457 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4458 data_ptr = mono_image_rva_map (method->klass->image, rva);
4459 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4460 /* for aot code we do the lookup on load */
4461 if (aot && data_ptr)
4462 return GUINT_TO_POINTER (rva);
4469 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4471 char *method_fname = mono_method_full_name (method, TRUE);
4474 if (mono_method_get_header (method)->code_size == 0)
4475 method_code = g_strdup ("method body is empty.");
4477 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4478 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4479 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4480 g_free (method_fname);
4481 g_free (method_code);
4485 set_exception_object (MonoCompile *cfg, MonoException *exception)
4487 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4488 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4489 cfg->exception_ptr = exception;
4493 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4497 if (cfg->generic_sharing_context)
4498 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4500 type = &klass->byval_arg;
4501 return MONO_TYPE_IS_REFERENCE (type);
4505 * mono_decompose_array_access_opts:
4507 * Decompose array access opcodes.
4510 mono_decompose_array_access_opts (MonoCompile *cfg)
4512 MonoBasicBlock *bb, *first_bb;
4515 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4516 * can be executed anytime. It should be run before decompose_long
4520 * Create a dummy bblock and emit code into it so we can use the normal
4521 * code generation macros.
4523 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4524 first_bb = cfg->cbb;
4526 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4528 MonoInst *prev = NULL;
4530 MonoInst *iargs [3];
4533 if (!bb->has_array_access)
4536 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4538 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4544 for (ins = bb->code; ins; ins = ins->next) {
4545 switch (ins->opcode) {
4547 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4548 G_STRUCT_OFFSET (MonoArray, max_length));
4549 MONO_ADD_INS (cfg->cbb, dest);
4551 case OP_BOUNDS_CHECK:
4552 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4555 if (cfg->opt & MONO_OPT_SHARED) {
4556 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4557 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4558 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4559 iargs [2]->dreg = ins->sreg1;
4561 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4562 dest->dreg = ins->dreg;
4564 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4567 NEW_VTABLECONST (cfg, iargs [0], vtable);
4568 MONO_ADD_INS (cfg->cbb, iargs [0]);
4569 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4570 iargs [1]->dreg = ins->sreg1;
4572 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4573 dest->dreg = ins->dreg;
4580 g_assert (cfg->cbb == first_bb);
4582 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4583 /* Replace the original instruction with the new code sequence */
4585 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4586 first_bb->code = first_bb->last_ins = NULL;
4587 first_bb->in_count = first_bb->out_count = 0;
4588 cfg->cbb = first_bb;
4595 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4605 #ifdef MONO_ARCH_SOFT_FLOAT
4608 * mono_handle_soft_float:
4610 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4611 * similar to long support on 32 bit platforms. 32 bit float values require special
4612 * handling when used as locals, arguments, and in calls.
4613 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4616 mono_handle_soft_float (MonoCompile *cfg)
4618 MonoBasicBlock *bb, *first_bb;
4621 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4625 * Create a dummy bblock and emit code into it so we can use the normal
4626 * code generation macros.
4628 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4629 first_bb = cfg->cbb;
4631 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4633 MonoInst *prev = NULL;
4636 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4638 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4644 for (ins = bb->code; ins; ins = ins->next) {
4645 const char *spec = INS_INFO (ins->opcode);
4647 /* Most fp operations are handled automatically by opcode emulation */
4649 switch (ins->opcode) {
4652 d.vald = *(double*)ins->inst_p0;
4653 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4658 /* We load the r8 value */
4659 d.vald = *(float*)ins->inst_p0;
4660 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4664 ins->opcode = OP_LMOVE;
4667 ins->opcode = OP_MOVE;
4668 ins->sreg1 = ins->sreg1 + 1;
4671 ins->opcode = OP_MOVE;
4672 ins->sreg1 = ins->sreg1 + 2;
4675 int reg = ins->sreg1;
4677 ins->opcode = OP_SETLRET;
4679 ins->sreg1 = reg + 1;
4680 ins->sreg2 = reg + 2;
4683 case OP_LOADR8_MEMBASE:
4684 ins->opcode = OP_LOADI8_MEMBASE;
4686 case OP_STORER8_MEMBASE_REG:
4687 ins->opcode = OP_STOREI8_MEMBASE_REG;
4689 case OP_STORER4_MEMBASE_REG: {
4690 MonoInst *iargs [2];
4693 /* Arg 1 is the double value */
4694 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4695 iargs [0]->dreg = ins->sreg1;
4697 /* Arg 2 is the address to store to */
4698 addr_reg = mono_alloc_preg (cfg);
4699 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4700 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4704 case OP_LOADR4_MEMBASE: {
4705 MonoInst *iargs [1];
4709 addr_reg = mono_alloc_preg (cfg);
4710 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4711 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4712 conv->dreg = ins->dreg;
4717 case OP_FCALL_MEMBASE: {
4718 MonoCallInst *call = (MonoCallInst*)ins;
4719 if (call->signature->ret->type == MONO_TYPE_R4) {
4720 MonoCallInst *call2;
4721 MonoInst *iargs [1];
4724 /* Convert the call into a call returning an int */
4725 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4726 memcpy (call2, call, sizeof (MonoCallInst));
4727 switch (ins->opcode) {
4729 call2->inst.opcode = OP_CALL;
4732 call2->inst.opcode = OP_CALL_REG;
4734 case OP_FCALL_MEMBASE:
4735 call2->inst.opcode = OP_CALL_MEMBASE;
4738 g_assert_not_reached ();
4740 call2->inst.dreg = mono_alloc_ireg (cfg);
4741 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4743 /* FIXME: Optimize this */
4745 /* Emit an r4->r8 conversion */
4746 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4747 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4748 conv->dreg = ins->dreg;
4750 switch (ins->opcode) {
4752 ins->opcode = OP_LCALL;
4755 ins->opcode = OP_LCALL_REG;
4757 case OP_FCALL_MEMBASE:
4758 ins->opcode = OP_LCALL_MEMBASE;
4761 g_assert_not_reached ();
4767 MonoJitICallInfo *info;
4768 MonoInst *iargs [2];
4769 MonoInst *call, *cmp, *br;
4771 /* Convert fcompare+fbcc to icall+icompare+beq */
4773 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4776 /* Create dummy MonoInst's for the arguments */
4777 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4778 iargs [0]->dreg = ins->sreg1;
4779 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4780 iargs [1]->dreg = ins->sreg2;
4782 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4784 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4785 cmp->sreg1 = call->dreg;
4787 MONO_ADD_INS (cfg->cbb, cmp);
4789 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4790 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4791 br->inst_true_bb = ins->next->inst_true_bb;
4792 br->inst_false_bb = ins->next->inst_false_bb;
4793 MONO_ADD_INS (cfg->cbb, br);
4795 /* The call sequence might include fp ins */
4798 /* Skip fbcc or fccc */
4799 NULLIFY_INS (ins->next);
4807 MonoJitICallInfo *info;
4808 MonoInst *iargs [2];
4811 /* Convert fccc to icall+icompare+iceq */
4813 info = mono_find_jit_opcode_emulation (ins->opcode);
4816 /* Create dummy MonoInst's for the arguments */
4817 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4818 iargs [0]->dreg = ins->sreg1;
4819 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4820 iargs [1]->dreg = ins->sreg2;
4822 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4825 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4827 /* The call sequence might include fp ins */
4832 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4833 mono_print_ins (ins);
4834 g_assert_not_reached ();
4839 g_assert (cfg->cbb == first_bb);
4841 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4842 /* Replace the original instruction with the new code sequence */
4844 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4845 first_bb->code = first_bb->last_ins = NULL;
4846 first_bb->in_count = first_bb->out_count = 0;
4847 cfg->cbb = first_bb;
4854 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4857 mono_decompose_long_opts (cfg);
4863 * mono_method_to_ir: translates IL into basic blocks containing trees
4866 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4867 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4868 guint inline_offset, gboolean is_virtual_call)
4870 MonoInst *ins, **sp, **stack_start;
4871 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4872 MonoMethod *cmethod, *method_definition;
4873 MonoInst **arg_array;
4874 MonoMethodHeader *header;
4876 guint32 token, ins_flag;
4878 MonoClass *constrained_call = NULL;
4879 unsigned char *ip, *end, *target, *err_pos;
4880 static double r8_0 = 0.0;
4881 MonoMethodSignature *sig;
4882 MonoGenericContext *generic_context = NULL;
4883 MonoGenericContainer *generic_container = NULL;
4884 MonoType **param_types;
4885 GList *bb_recheck = NULL, *tmp;
4886 int i, n, start_new_bblock, dreg;
4887 int num_calls = 0, inline_costs = 0;
4888 int breakpoint_id = 0;
4890 MonoBoolean security, pinvoke;
4891 MonoSecurityManager* secman = NULL;
4892 MonoDeclSecurityActions actions;
4893 GSList *class_inits = NULL;
4894 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4897 /* serialization and xdomain stuff may need access to private fields and methods */
4898 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4899 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4900 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4901 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4902 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4903 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4905 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4907 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4908 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4909 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4910 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4912 image = method->klass->image;
4913 header = mono_method_get_header (method);
4914 generic_container = mono_method_get_generic_container (method);
4915 sig = mono_method_signature (method);
4916 num_args = sig->hasthis + sig->param_count;
4917 ip = (unsigned char*)header->code;
4918 cfg->cil_start = ip;
4919 end = ip + header->code_size;
4920 mono_jit_stats.cil_code_size += header->code_size;
4922 method_definition = method;
4923 while (method_definition->is_inflated) {
4924 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4925 method_definition = imethod->declaring;
4928 /* SkipVerification is not allowed if core-clr is enabled */
4929 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4931 dont_verify_stloc = TRUE;
4934 if (!dont_verify && mini_method_verify (cfg, method_definition))
4935 goto exception_exit;
4937 if (sig->is_inflated)
4938 generic_context = mono_method_get_context (method);
4939 else if (generic_container)
4940 generic_context = &generic_container->context;
4941 cfg->generic_context = generic_context;
4943 if (!cfg->generic_sharing_context)
4944 g_assert (!sig->has_type_parameters);
4946 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4947 g_assert (method->is_inflated);
4948 g_assert (mono_method_get_context (method)->method_inst);
4950 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4951 g_assert (sig->generic_param_count);
4953 if (cfg->method == method) {
4954 cfg->real_offset = 0;
4956 cfg->real_offset = inline_offset;
4959 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4960 cfg->cil_offset_to_bb_len = header->code_size;
4962 cfg->current_method = method;
4964 if (cfg->verbose_level > 2)
4965 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4967 dont_inline = g_list_prepend (dont_inline, method);
4968 if (cfg->method == method) {
4970 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4971 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4974 NEW_BBLOCK (cfg, start_bblock);
4975 cfg->bb_entry = start_bblock;
4976 start_bblock->cil_code = NULL;
4977 start_bblock->cil_length = 0;
4980 NEW_BBLOCK (cfg, end_bblock);
4981 cfg->bb_exit = end_bblock;
4982 end_bblock->cil_code = NULL;
4983 end_bblock->cil_length = 0;
4984 g_assert (cfg->num_bblocks == 2);
4986 arg_array = cfg->args;
4988 if (header->num_clauses) {
4989 cfg->spvars = g_hash_table_new (NULL, NULL);
4990 cfg->exvars = g_hash_table_new (NULL, NULL);
4992 /* handle exception clauses */
4993 for (i = 0; i < header->num_clauses; ++i) {
4994 MonoBasicBlock *try_bb;
4995 MonoExceptionClause *clause = &header->clauses [i];
4996 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4997 try_bb->real_offset = clause->try_offset;
4998 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4999 tblock->real_offset = clause->handler_offset;
5000 tblock->flags |= BB_EXCEPTION_HANDLER;
5002 link_bblock (cfg, try_bb, tblock);
5004 if (*(ip + clause->handler_offset) == CEE_POP)
5005 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5007 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5008 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5009 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5010 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5011 MONO_ADD_INS (tblock, ins);
5013 /* todo: is a fault block unsafe to optimize? */
5014 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5015 tblock->flags |= BB_EXCEPTION_UNSAFE;
5019 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5021 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5023 /* catch and filter blocks get the exception object on the stack */
5024 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5025 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5026 MonoInst *dummy_use;
5028 /* mostly like handle_stack_args (), but just sets the input args */
5029 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5030 tblock->in_scount = 1;
5031 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5032 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5035 * Add a dummy use for the exvar so its liveness info will be
5039 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5041 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5042 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5043 tblock->real_offset = clause->data.filter_offset;
5044 tblock->in_scount = 1;
5045 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5046 /* The filter block shares the exvar with the handler block */
5047 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5048 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5049 MONO_ADD_INS (tblock, ins);
5053 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5054 clause->data.catch_class &&
5055 cfg->generic_sharing_context &&
5056 mono_class_check_context_used (clause->data.catch_class)) {
5057 if (mono_method_get_context (method)->method_inst)
5058 GENERIC_SHARING_FAILURE (CEE_NOP);
5061 * In shared generic code with catch
5062 * clauses containing type variables
5063 * the exception handling code has to
5064 * be able to get to the rgctx.
5065 * Therefore we have to make sure that
5066 * the vtable/mrgctx argument (for
5067 * static or generic methods) or the
5068 * "this" argument (for non-static
5069 * methods) are live.
5071 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5072 mini_method_get_context (method)->method_inst) {
5073 mono_get_vtable_var (cfg);
5075 MonoInst *dummy_use;
5077 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5082 arg_array = alloca (sizeof (MonoInst *) * num_args);
5083 cfg->cbb = start_bblock;
5084 mono_save_args (cfg, sig, inline_args, arg_array);
5085 cfg->args = arg_array;
5088 /* FIRST CODE BLOCK */
5089 NEW_BBLOCK (cfg, bblock);
5090 bblock->cil_code = ip;
5094 ADD_BBLOCK (cfg, bblock);
5096 if (cfg->method == method) {
5097 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5098 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5099 MONO_INST_NEW (cfg, ins, OP_BREAK);
5100 MONO_ADD_INS (bblock, ins);
5104 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5105 secman = mono_security_manager_get_methods ();
5107 security = (secman && mono_method_has_declsec (method));
5108 /* at this point having security doesn't mean we have any code to generate */
5109 if (security && (cfg->method == method)) {
5110 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5111 * And we do not want to enter the next section (with allocation) if we
5112 * have nothing to generate */
5113 security = mono_declsec_get_demands (method, &actions);
5116 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5117 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5119 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5120 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5121 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5123 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5124 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5128 mono_custom_attrs_free (custom);
5131 custom = mono_custom_attrs_from_class (wrapped->klass);
5132 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5136 mono_custom_attrs_free (custom);
5139 /* not a P/Invoke after all */
5144 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5145 /* we use a separate basic block for the initialization code */
5146 NEW_BBLOCK (cfg, init_localsbb);
5147 cfg->bb_init = init_localsbb;
5148 init_localsbb->real_offset = cfg->real_offset;
5149 start_bblock->next_bb = init_localsbb;
5150 init_localsbb->next_bb = bblock;
5151 link_bblock (cfg, start_bblock, init_localsbb);
5152 link_bblock (cfg, init_localsbb, bblock);
5154 cfg->cbb = init_localsbb;
5156 start_bblock->next_bb = bblock;
5157 link_bblock (cfg, start_bblock, bblock);
5160 /* at this point we know, if security is TRUE, that some code needs to be generated */
5161 if (security && (cfg->method == method)) {
5164 mono_jit_stats.cas_demand_generation++;
5166 if (actions.demand.blob) {
5167 /* Add code for SecurityAction.Demand */
5168 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5169 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5170 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5171 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5173 if (actions.noncasdemand.blob) {
5174 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5175 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5176 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5177 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5178 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5179 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5181 if (actions.demandchoice.blob) {
5182 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5183 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5184 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5185 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5186 mono_emit_method_call (cfg, secman->demandchoice, mono_method_signature (secman->demandchoice), args, NULL);
5190 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5192 mono_emit_method_call (cfg, secman->demandunmanaged, mono_method_signature (secman->demandunmanaged), NULL, NULL);
5195 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5196 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5197 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5198 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5199 if (!(method->klass && method->klass->image &&
5200 mono_security_core_clr_is_platform_image (method->klass->image))) {
5201 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5205 if (!method_is_safe (method))
5206 emit_throw_verification_exception (cfg, bblock, ip);
5209 if (header->code_size == 0)
5212 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5217 if (cfg->method == method)
5218 mono_debug_init_method (cfg, bblock, breakpoint_id);
5220 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5222 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5223 for (n = 0; n < sig->param_count; ++n)
5224 param_types [n + sig->hasthis] = sig->params [n];
5225 cfg->arg_types = param_types;
5226 for (n = 0; n < header->num_locals; ++n) {
5227 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5232 /* add a check for this != NULL to inlined methods */
5233 if (is_virtual_call) {
5236 NEW_ARGLOAD (cfg, arg_ins, 0);
5237 MONO_ADD_INS (cfg->cbb, arg_ins);
5238 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5239 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5240 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5243 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5244 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5247 start_new_bblock = 0;
5251 if (cfg->method == method)
5252 cfg->real_offset = ip - header->code;
5254 cfg->real_offset = inline_offset;
5259 if (start_new_bblock) {
5260 bblock->cil_length = ip - bblock->cil_code;
5261 if (start_new_bblock == 2) {
5262 g_assert (ip == tblock->cil_code);
5264 GET_BBLOCK (cfg, tblock, ip);
5266 bblock->next_bb = tblock;
5269 start_new_bblock = 0;
5270 for (i = 0; i < bblock->in_scount; ++i) {
5271 if (cfg->verbose_level > 3)
5272 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5273 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5277 g_slist_free (class_inits);
5280 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5281 link_bblock (cfg, bblock, tblock);
5282 if (sp != stack_start) {
5283 handle_stack_args (cfg, stack_start, sp - stack_start);
5285 CHECK_UNVERIFIABLE (cfg);
5287 bblock->next_bb = tblock;
5290 for (i = 0; i < bblock->in_scount; ++i) {
5291 if (cfg->verbose_level > 3)
5292 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5293 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5296 g_slist_free (class_inits);
5301 bblock->real_offset = cfg->real_offset;
5303 if ((cfg->method == method) && cfg->coverage_info) {
5304 guint32 cil_offset = ip - header->code;
5305 cfg->coverage_info->data [cil_offset].cil_code = ip;
5307 /* TODO: Use an increment here */
5308 #if defined(__i386__)
5309 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5310 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5312 MONO_ADD_INS (cfg->cbb, ins);
5314 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5315 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5319 if (cfg->verbose_level > 3)
5320 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5325 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5327 MONO_ADD_INS (bblock, ins);
5333 CHECK_STACK_OVF (1);
5334 n = (*ip)-CEE_LDARG_0;
5336 EMIT_NEW_ARGLOAD (cfg, ins, n);
5344 CHECK_STACK_OVF (1);
5345 n = (*ip)-CEE_LDLOC_0;
5347 EMIT_NEW_LOCLOAD (cfg, ins, n);
5358 n = (*ip)-CEE_STLOC_0;
5361 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5364 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5365 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5366 /* Optimize reg-reg moves away */
5368 * Can't optimize other opcodes, since sp[0] might point to
5369 * the last ins of a decomposed opcode.
5371 sp [0]->dreg = (cfg)->locals [n]->dreg;
5373 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5381 CHECK_STACK_OVF (1);
5384 EMIT_NEW_ARGLOAD (cfg, ins, n);
5390 CHECK_STACK_OVF (1);
5393 NEW_ARGLOADA (cfg, ins, n);
5394 MONO_ADD_INS (cfg->cbb, ins);
5404 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5406 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5411 CHECK_STACK_OVF (1);
5414 EMIT_NEW_LOCLOAD (cfg, ins, n);
5420 CHECK_STACK_OVF (1);
5421 CHECK_LOCAL (ip [1]);
5424 * ldloca inhibits many optimizations so try to get rid of it in common
5427 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5428 gboolean skip = FALSE;
5430 /* From the INITOBJ case */
5431 token = read32 (ip + 4);
5432 klass = mini_get_class (method, token, generic_context);
5433 CHECK_TYPELOAD (klass);
5434 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
5435 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
5437 if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5438 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5439 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5440 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5452 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5460 CHECK_LOCAL (ip [1]);
5461 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5463 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5468 CHECK_STACK_OVF (1);
5469 EMIT_NEW_PCONST (cfg, ins, NULL);
5470 ins->type = STACK_OBJ;
5475 CHECK_STACK_OVF (1);
5476 EMIT_NEW_ICONST (cfg, ins, -1);
5489 CHECK_STACK_OVF (1);
5490 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5496 CHECK_STACK_OVF (1);
5498 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5504 CHECK_STACK_OVF (1);
5505 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5511 CHECK_STACK_OVF (1);
5512 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5513 ins->type = STACK_I8;
5514 ins->dreg = alloc_dreg (cfg, STACK_I8);
5516 ins->inst_l = (gint64)read64 (ip);
5517 MONO_ADD_INS (bblock, ins);
5523 /* FIXME: we should really allocate this only late in the compilation process */
5524 mono_domain_lock (cfg->domain);
5525 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5526 mono_domain_unlock (cfg->domain);
5528 CHECK_STACK_OVF (1);
5529 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5530 ins->type = STACK_R8;
5531 ins->dreg = alloc_dreg (cfg, STACK_R8);
5535 MONO_ADD_INS (bblock, ins);
5543 /* FIXME: we should really allocate this only late in the compilation process */
5544 mono_domain_lock (cfg->domain);
5545 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5546 mono_domain_unlock (cfg->domain);
5548 CHECK_STACK_OVF (1);
5549 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5550 ins->type = STACK_R8;
5551 ins->dreg = alloc_dreg (cfg, STACK_R8);
5555 MONO_ADD_INS (bblock, ins);
5562 MonoInst *temp, *store;
5564 CHECK_STACK_OVF (1);
5568 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5569 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5571 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5574 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5587 if (sp [0]->type == STACK_R8)
5588 /* we need to pop the value from the x86 FP stack */
5589 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5596 if (stack_start != sp)
5598 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5599 ins = (MonoInst*)call;
5600 token = read32 (ip + 1);
5601 /* FIXME: check the signature matches */
5602 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5607 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5608 GENERIC_SHARING_FAILURE (CEE_JMP);
5610 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5611 if (check_linkdemand (cfg, method, cmethod))
5613 CHECK_CFG_EXCEPTION;
5616 ins->inst_p0 = cmethod;
5617 MONO_ADD_INS (bblock, ins);
5619 start_new_bblock = 1;
5622 cfg->disable_aot = 1;
5627 case CEE_CALLVIRT: {
5628 MonoInst *addr = NULL;
5629 MonoMethodSignature *fsig = NULL;
5631 int virtual = *ip == CEE_CALLVIRT;
5632 int calli = *ip == CEE_CALLI;
5633 gboolean pass_imt_from_rgctx = FALSE;
5634 MonoInst *imt_arg = NULL;
5635 gboolean pass_vtable = FALSE;
5636 gboolean pass_mrgctx = FALSE;
5637 MonoInst *vtable_arg = NULL;
5638 gboolean check_this = FALSE;
5641 token = read32 (ip + 1);
5648 if (method->wrapper_type != MONO_WRAPPER_NONE)
5649 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5651 fsig = mono_metadata_parse_signature (image, token);
5653 n = fsig->param_count + fsig->hasthis;
5655 MonoMethod *cil_method;
5657 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5658 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5659 cil_method = cmethod;
5660 } else if (constrained_call) {
5661 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5663 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5664 cil_method = cmethod;
5669 if (!dont_verify && !cfg->skip_visibility) {
5670 MonoMethod *target_method = cil_method;
5671 if (method->is_inflated) {
5672 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5674 if (!mono_method_can_access_method (method_definition, target_method) &&
5675 !mono_method_can_access_method (method, cil_method))
5676 METHOD_ACCESS_FAILURE;
5679 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5680 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5682 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5683 /* MS.NET seems to silently convert this to a callvirt */
5686 if (!cmethod->klass->inited)
5687 if (!mono_class_init (cmethod->klass))
5690 if (mono_method_signature (cmethod)->pinvoke) {
5691 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
5692 fsig = mono_method_signature (wrapper);
5693 } else if (constrained_call) {
5694 fsig = mono_method_signature (cmethod);
5696 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5699 mono_save_token_info (cfg, image, token, cmethod);
5701 n = fsig->param_count + fsig->hasthis;
5703 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5704 if (check_linkdemand (cfg, method, cmethod))
5706 CHECK_CFG_EXCEPTION;
5709 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5710 mini_class_is_system_array (cmethod->klass)) {
5711 array_rank = cmethod->klass->rank;
5714 if (cmethod->string_ctor)
5715 g_assert_not_reached ();
5718 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5721 if (!cfg->generic_sharing_context && cmethod)
5722 g_assert (!mono_method_check_context_used (cmethod));
5726 //g_assert (!virtual || fsig->hasthis);
5730 if (constrained_call) {
5732 * We have the `constrained.' prefix opcode.
5734 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5738 * The type parameter is instantiated as a valuetype,
5739 * but that type doesn't override the method we're
5740 * calling, so we need to box `this'.
5742 dreg = alloc_dreg (cfg, STACK_VTYPE);
5743 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5744 ins->klass = constrained_call;
5745 sp [0] = handle_box (cfg, ins, constrained_call);
5746 } else if (!constrained_call->valuetype) {
5747 int dreg = alloc_preg (cfg);
5750 * The type parameter is instantiated as a reference
5751 * type. We have a managed pointer on the stack, so
5752 * we need to dereference it here.
5754 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5755 ins->type = STACK_OBJ;
5757 } else if (cmethod->klass->valuetype)
5759 constrained_call = NULL;
5762 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5766 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5767 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5768 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5769 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5770 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5773 * Pass vtable iff target method might
5774 * be shared, which means that sharing
5775 * is enabled for its class and its
5776 * context is sharable (and it's not a
5779 if (sharing_enabled && context_sharable &&
5780 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5784 if (cmethod && mini_method_get_context (cmethod) &&
5785 mini_method_get_context (cmethod)->method_inst) {
5786 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5787 MonoGenericContext *context = mini_method_get_context (cmethod);
5788 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5790 g_assert (!pass_vtable);
5792 if (sharing_enabled && context_sharable)
5796 if (cfg->generic_sharing_context && cmethod) {
5797 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5799 context_used = mono_method_check_context_used (cmethod);
5801 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5802 /* Generic method interface
5803 calls are resolved via a
5804 helper function and don't
5806 if (!cmethod_context || !cmethod_context->method_inst)
5807 pass_imt_from_rgctx = TRUE;
5811 * If a shared method calls another
5812 * shared method then the caller must
5813 * have a generic sharing context
5814 * because the magic trampoline
5815 * requires it. FIXME: We shouldn't
5816 * have to force the vtable/mrgctx
5817 * variable here. Instead there
5818 * should be a flag in the cfg to
5819 * request a generic sharing context.
5821 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5822 mono_get_vtable_var (cfg);
5827 GENERIC_SHARING_FAILURE (*ip);
5833 EMIT_GET_RGCTX (rgctx, context_used);
5834 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5836 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5838 CHECK_TYPELOAD (cmethod->klass);
5839 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5844 g_assert (!vtable_arg);
5849 EMIT_GET_RGCTX (rgctx, context_used);
5850 vtable_arg = emit_get_rgctx_method_rgctx (cfg, context_used, rgctx, cmethod);
5852 MonoMethodRuntimeGenericContext *mrgctx;
5854 mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
5855 mini_method_get_context (cmethod)->method_inst);
5857 EMIT_NEW_PCONST (cfg, vtable_arg, mrgctx);
5860 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5861 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5868 if (pass_imt_from_rgctx) {
5871 g_assert (!pass_vtable);
5874 EMIT_GET_RGCTX (rgctx, context_used);
5875 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5876 MONO_RGCTX_INFO_METHOD);
5882 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5883 check->sreg1 = sp [0]->dreg;
5884 MONO_ADD_INS (cfg->cbb, check);
5887 /* Calling virtual generic methods */
5888 if (cmethod && virtual &&
5889 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5890 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5891 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5892 mono_method_signature (cmethod)->generic_param_count) {
5893 MonoInst *this_temp, *this_arg_temp, *store;
5894 MonoInst *iargs [4];
5896 g_assert (mono_method_signature (cmethod)->is_inflated);
5898 /* Prevent inlining of methods that contain indirect calls */
5901 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5902 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5903 MONO_ADD_INS (bblock, store);
5905 /* FIXME: This should be a managed pointer */
5906 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5908 /* Because of the PCONST below */
5909 cfg->disable_aot = TRUE;
5910 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5914 EMIT_GET_RGCTX (rgctx, context_used);
5915 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5916 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5917 addr = mono_emit_jit_icall (cfg,
5918 mono_helper_compile_generic_method_wo_context, iargs);
5920 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5921 EMIT_NEW_PCONST (cfg, iargs [2], mono_method_get_context (cmethod));
5922 EMIT_NEW_TEMPLOADA (cfg, iargs [3], this_arg_temp->inst_c0);
5923 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5926 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5928 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5929 if (!MONO_TYPE_IS_VOID (fsig->ret))
5938 /* FIXME: runtime generic context pointer for jumps? */
5939 /* FIXME: handle this for generic sharing eventually */
5940 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5941 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5944 /* FIXME: runtime generic context pointer for jumps? */
5945 GENERIC_SHARING_FAILURE (*ip);
5947 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5950 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5951 call->tail_call = TRUE;
5952 call->method = cmethod;
5953 call->signature = mono_method_signature (cmethod);
5956 /* Handle tail calls similarly to calls */
5957 call->inst.opcode = OP_TAILCALL;
5959 mono_arch_emit_call (cfg, call);
5962 * We implement tail calls by storing the actual arguments into the
5963 * argument variables, then emitting a CEE_JMP.
5965 for (i = 0; i < n; ++i) {
5966 /* Prevent argument from being register allocated */
5967 arg_array [i]->flags |= MONO_INST_VOLATILE;
5968 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5973 cfg->disable_aot = 1;
5975 ins = (MonoInst*)call;
5976 ins->inst_p0 = cmethod;
5977 ins->inst_p1 = arg_array [0];
5978 MONO_ADD_INS (bblock, ins);
5979 link_bblock (cfg, bblock, end_bblock);
5980 start_new_bblock = 1;
5981 /* skip CEE_RET as well */
5987 /* Conversion to a JIT intrinsic */
5988 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5989 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5990 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6001 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6002 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6003 mono_method_check_inlining (cfg, cmethod) &&
6004 !g_list_find (dont_inline, cmethod)) {
6006 gboolean allways = FALSE;
6008 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6009 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6010 /* Prevent inlining of methods that call wrappers */
6012 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
6016 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6018 cfg->real_offset += 5;
6021 if (!MONO_TYPE_IS_VOID (fsig->ret))
6022 /* *sp is already set by inline_method */
6025 inline_costs += costs;
6031 inline_costs += 10 * num_calls++;
6033 /* Tail recursion elimination */
6034 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6035 gboolean has_vtargs = FALSE;
6038 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6041 /* keep it simple */
6042 for (i = fsig->param_count - 1; i >= 0; i--) {
6043 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6048 for (i = 0; i < n; ++i)
6049 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6050 MONO_INST_NEW (cfg, ins, OP_BR);
6051 MONO_ADD_INS (bblock, ins);
6052 tblock = start_bblock->out_bb [0];
6053 link_bblock (cfg, bblock, tblock);
6054 ins->inst_target_bb = tblock;
6055 start_new_bblock = 1;
6057 /* skip the CEE_RET, too */
6058 if (ip_in_bb (cfg, bblock, ip + 5))
6068 /* Generic sharing */
6069 /* FIXME: only do this for generic methods if
6070 they are not shared! */
6072 (cmethod->klass->valuetype ||
6073 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6074 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6075 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6076 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6077 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6078 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6083 g_assert (cfg->generic_sharing_context && cmethod);
6087 * We are compiling a call to a
6088 * generic method from shared code,
6089 * which means that we have to look up
6090 * the method in the rgctx and do an
6094 EMIT_GET_RGCTX (rgctx, context_used);
6095 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6098 /* Indirect calls */
6100 g_assert (!imt_arg);
6102 if (*ip == CEE_CALL)
6103 g_assert (context_used);
6104 else if (*ip == CEE_CALLI)
6105 g_assert (!vtable_arg);
6107 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6108 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6110 /* Prevent inlining of methods with indirect calls */
6114 #ifdef MONO_ARCH_RGCTX_REG
6116 int rgctx_reg = mono_alloc_preg (cfg);
6118 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6119 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6120 call = (MonoCallInst*)ins;
6121 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6123 GENERIC_SHARING_FAILURE (*ip);
6126 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6128 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6129 if (fsig->pinvoke && !fsig->ret->byref) {
6133 * Native code might return non register sized integers
6134 * without initializing the upper bits.
6136 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6137 case OP_LOADI1_MEMBASE:
6138 widen_op = OP_ICONV_TO_I1;
6140 case OP_LOADU1_MEMBASE:
6141 widen_op = OP_ICONV_TO_U1;
6143 case OP_LOADI2_MEMBASE:
6144 widen_op = OP_ICONV_TO_I2;
6146 case OP_LOADU2_MEMBASE:
6147 widen_op = OP_ICONV_TO_U2;
6153 if (widen_op != -1) {
6154 int dreg = alloc_preg (cfg);
6157 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6158 widen->type = ins->type;
6175 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6176 if (sp [fsig->param_count]->type == STACK_OBJ) {
6177 MonoInst *iargs [2];
6180 iargs [1] = sp [fsig->param_count];
6182 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6185 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6186 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6187 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6188 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6190 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6193 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6194 if (!cmethod->klass->element_class->valuetype && !readonly)
6195 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6198 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6201 g_assert_not_reached ();
6209 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6211 if (!MONO_TYPE_IS_VOID (fsig->ret))
6222 #ifdef MONO_ARCH_RGCTX_REG
6224 int rgctx_reg = mono_alloc_preg (cfg);
6226 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6227 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6228 call = (MonoCallInst*)ins;
6229 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6231 GENERIC_SHARING_FAILURE (*ip);
6233 } else if (imt_arg) {
6234 ins = (MonoInst*)mono_emit_imt_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6236 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6239 if (!MONO_TYPE_IS_VOID (fsig->ret))
6247 if (cfg->method != method) {
6248 /* return from inlined method */
6253 //g_assert (returnvar != -1);
6254 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6255 cfg->ret_var_set = TRUE;
6259 MonoType *ret_type = mono_method_signature (method)->ret;
6261 g_assert (!return_var);
6264 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6267 if (!cfg->vret_addr) {
6270 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6272 EMIT_NEW_RETLOADA (cfg, ret_addr);
6274 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6275 ins->klass = mono_class_from_mono_type (ret_type);
6278 #ifdef MONO_ARCH_SOFT_FLOAT
6279 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6280 MonoInst *iargs [1];
6284 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6285 mono_arch_emit_setret (cfg, method, conv);
6287 mono_arch_emit_setret (cfg, method, *sp);
6290 mono_arch_emit_setret (cfg, method, *sp);
6295 if (sp != stack_start)
6297 MONO_INST_NEW (cfg, ins, OP_BR);
6299 ins->inst_target_bb = end_bblock;
6300 MONO_ADD_INS (bblock, ins);
6301 link_bblock (cfg, bblock, end_bblock);
6302 start_new_bblock = 1;
6306 MONO_INST_NEW (cfg, ins, OP_BR);
6308 target = ip + 1 + (signed char)(*ip);
6310 GET_BBLOCK (cfg, tblock, target);
6311 link_bblock (cfg, bblock, tblock);
6312 CHECK_BBLOCK (target, ip, tblock);
6313 ins->inst_target_bb = tblock;
6314 if (sp != stack_start) {
6315 handle_stack_args (cfg, stack_start, sp - stack_start);
6317 CHECK_UNVERIFIABLE (cfg);
6319 MONO_ADD_INS (bblock, ins);
6320 start_new_bblock = 1;
6321 inline_costs += BRANCH_COST;
6335 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6337 target = ip + 1 + *(signed char*)ip;
6343 inline_costs += BRANCH_COST;
6347 MONO_INST_NEW (cfg, ins, OP_BR);
6350 target = ip + 4 + (gint32)read32(ip);
6352 GET_BBLOCK (cfg, tblock, target);
6353 link_bblock (cfg, bblock, tblock);
6354 CHECK_BBLOCK (target, ip, tblock);
6355 ins->inst_target_bb = tblock;
6356 if (sp != stack_start) {
6357 handle_stack_args (cfg, stack_start, sp - stack_start);
6359 CHECK_UNVERIFIABLE (cfg);
6362 MONO_ADD_INS (bblock, ins);
6364 start_new_bblock = 1;
6365 inline_costs += BRANCH_COST;
6372 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6373 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6374 guint32 opsize = is_short ? 1 : 4;
6376 CHECK_OPSIZE (opsize);
6378 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6381 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6386 GET_BBLOCK (cfg, tblock, target);
6387 link_bblock (cfg, bblock, tblock);
6388 CHECK_BBLOCK (target, ip, tblock);
6389 GET_BBLOCK (cfg, tblock, ip);
6390 link_bblock (cfg, bblock, tblock);
6392 if (sp != stack_start) {
6393 handle_stack_args (cfg, stack_start, sp - stack_start);
6394 CHECK_UNVERIFIABLE (cfg);
6397 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6398 cmp->sreg1 = sp [0]->dreg;
6399 type_from_op (cmp, sp [0], NULL);
6402 #if SIZEOF_VOID_P == 4
6403 if (cmp->opcode == OP_LCOMPARE_IMM) {
6404 /* Convert it to OP_LCOMPARE */
6405 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6406 ins->type = STACK_I8;
6407 ins->dreg = alloc_dreg (cfg, STACK_I8);
6409 MONO_ADD_INS (bblock, ins);
6410 cmp->opcode = OP_LCOMPARE;
6411 cmp->sreg2 = ins->dreg;
6414 MONO_ADD_INS (bblock, cmp);
6416 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6417 type_from_op (ins, sp [0], NULL);
6418 MONO_ADD_INS (bblock, ins);
6419 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6420 GET_BBLOCK (cfg, tblock, target);
6421 ins->inst_true_bb = tblock;
6422 GET_BBLOCK (cfg, tblock, ip);
6423 ins->inst_false_bb = tblock;
6424 start_new_bblock = 2;
6427 inline_costs += BRANCH_COST;
6442 MONO_INST_NEW (cfg, ins, *ip);
6444 target = ip + 4 + (gint32)read32(ip);
6450 inline_costs += BRANCH_COST;
6454 MonoBasicBlock **targets;
6455 MonoBasicBlock *default_bblock;
6456 MonoJumpInfoBBTable *table;
6457 int offset_reg = alloc_preg (cfg);
6458 int target_reg = alloc_preg (cfg);
6459 int table_reg = alloc_preg (cfg);
6460 int sum_reg = alloc_preg (cfg);
6464 n = read32 (ip + 1);
6467 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6471 CHECK_OPSIZE (n * sizeof (guint32));
6472 target = ip + n * sizeof (guint32);
6474 GET_BBLOCK (cfg, default_bblock, target);
6476 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6477 for (i = 0; i < n; ++i) {
6478 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6479 targets [i] = tblock;
6483 if (sp != stack_start) {
6485 * Link the current bb with the targets as well, so handle_stack_args
6486 * will set their in_stack correctly.
6488 link_bblock (cfg, bblock, default_bblock);
6489 for (i = 0; i < n; ++i)
6490 link_bblock (cfg, bblock, targets [i]);
6492 handle_stack_args (cfg, stack_start, sp - stack_start);
6494 CHECK_UNVERIFIABLE (cfg);
6497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6501 for (i = 0; i < n; ++i)
6502 link_bblock (cfg, bblock, targets [i]);
6504 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6505 table->table = targets;
6506 table->table_size = n;
6509 /* ARM implements SWITCH statements differently */
6510 /* FIXME: Make it use the generic implementation */
6511 /* the backend code will deal with aot vs normal case */
6512 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6513 ins->sreg1 = src1->dreg;
6514 ins->inst_p0 = table;
6515 ins->inst_many_bb = targets;
6516 ins->klass = GUINT_TO_POINTER (n);
6517 MONO_ADD_INS (cfg->cbb, ins);
6519 if (sizeof (gpointer) == 8)
6520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6524 #if SIZEOF_VOID_P == 8
6525 /* The upper word might not be zero, and we add it to a 64 bit address later */
6526 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6529 if (cfg->compile_aot) {
6530 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6532 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6533 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6534 ins->inst_p0 = table;
6535 ins->dreg = table_reg;
6536 MONO_ADD_INS (cfg->cbb, ins);
6539 /* FIXME: Use load_memindex */
6540 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6541 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6542 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6544 start_new_bblock = 1;
6545 inline_costs += (BRANCH_COST * 2);
6565 dreg = alloc_freg (cfg);
6568 dreg = alloc_lreg (cfg);
6571 dreg = alloc_preg (cfg);
6574 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6575 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6576 ins->flags |= ins_flag;
6578 MONO_ADD_INS (bblock, ins);
6593 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6594 ins->flags |= ins_flag;
6596 MONO_ADD_INS (bblock, ins);
6604 MONO_INST_NEW (cfg, ins, (*ip));
6606 ins->sreg1 = sp [0]->dreg;
6607 ins->sreg2 = sp [1]->dreg;
6608 type_from_op (ins, sp [0], sp [1]);
6610 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6612 /* Use the immediate opcodes if possible */
6613 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6614 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6615 if (imm_opcode != -1) {
6616 ins->opcode = imm_opcode;
6617 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6620 sp [1]->opcode = OP_NOP;
6624 MONO_ADD_INS ((cfg)->cbb, (ins));
6627 mono_decompose_opcode (cfg, ins);
6644 MONO_INST_NEW (cfg, ins, (*ip));
6646 ins->sreg1 = sp [0]->dreg;
6647 ins->sreg2 = sp [1]->dreg;
6648 type_from_op (ins, sp [0], sp [1]);
6650 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6651 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6653 /* FIXME: Pass opcode to is_inst_imm */
6655 /* Use the immediate opcodes if possible */
6656 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6659 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6660 if (imm_opcode != -1) {
6661 ins->opcode = imm_opcode;
6662 if (sp [1]->opcode == OP_I8CONST) {
6663 #if SIZEOF_VOID_P == 8
6664 ins->inst_imm = sp [1]->inst_l;
6666 ins->inst_ls_word = sp [1]->inst_ls_word;
6667 ins->inst_ms_word = sp [1]->inst_ms_word;
6671 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6674 sp [1]->opcode = OP_NOP;
6677 MONO_ADD_INS ((cfg)->cbb, (ins));
6680 mono_decompose_opcode (cfg, ins);
6693 case CEE_CONV_OVF_I8:
6694 case CEE_CONV_OVF_U8:
6698 /* Special case this earlier so we have long constants in the IR */
6699 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6700 int data = sp [-1]->inst_c0;
6701 sp [-1]->opcode = OP_I8CONST;
6702 sp [-1]->type = STACK_I8;
6703 #if SIZEOF_VOID_P == 8
6704 if ((*ip) == CEE_CONV_U8)
6705 sp [-1]->inst_c0 = (guint32)data;
6707 sp [-1]->inst_c0 = data;
6709 sp [-1]->inst_ls_word = data;
6710 if ((*ip) == CEE_CONV_U8)
6711 sp [-1]->inst_ms_word = 0;
6713 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6715 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6722 case CEE_CONV_OVF_I4:
6723 case CEE_CONV_OVF_I1:
6724 case CEE_CONV_OVF_I2:
6725 case CEE_CONV_OVF_I:
6726 case CEE_CONV_OVF_U:
6729 if (sp [-1]->type == STACK_R8) {
6730 ADD_UNOP (CEE_CONV_OVF_I8);
6737 case CEE_CONV_OVF_U1:
6738 case CEE_CONV_OVF_U2:
6739 case CEE_CONV_OVF_U4:
6742 if (sp [-1]->type == STACK_R8) {
6743 ADD_UNOP (CEE_CONV_OVF_U8);
6750 case CEE_CONV_OVF_I1_UN:
6751 case CEE_CONV_OVF_I2_UN:
6752 case CEE_CONV_OVF_I4_UN:
6753 case CEE_CONV_OVF_I8_UN:
6754 case CEE_CONV_OVF_U1_UN:
6755 case CEE_CONV_OVF_U2_UN:
6756 case CEE_CONV_OVF_U4_UN:
6757 case CEE_CONV_OVF_U8_UN:
6758 case CEE_CONV_OVF_I_UN:
6759 case CEE_CONV_OVF_U_UN:
6769 case CEE_ADD_OVF_UN:
6771 case CEE_MUL_OVF_UN:
6773 case CEE_SUB_OVF_UN:
6781 token = read32 (ip + 1);
6782 klass = mini_get_class (method, token, generic_context);
6783 CHECK_TYPELOAD (klass);
6785 if (generic_class_is_reference_type (cfg, klass)) {
6786 MonoInst *store, *load;
6787 int dreg = alloc_preg (cfg);
6789 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6790 load->flags |= ins_flag;
6791 MONO_ADD_INS (cfg->cbb, load);
6793 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6794 store->flags |= ins_flag;
6795 MONO_ADD_INS (cfg->cbb, store);
6797 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6809 token = read32 (ip + 1);
6810 klass = mini_get_class (method, token, generic_context);
6811 CHECK_TYPELOAD (klass);
6813 /* Optimize the common ldobj+stloc combination */
6823 loc_index = ip [5] - CEE_STLOC_0;
6830 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6831 CHECK_LOCAL (loc_index);
6833 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6834 ins->dreg = cfg->locals [loc_index]->dreg;
6840 /* Optimize the ldobj+stobj combination */
6841 /* The reference case ends up being a load+store anyway */
6842 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6847 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6854 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6863 CHECK_STACK_OVF (1);
6865 n = read32 (ip + 1);
6867 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6868 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6869 ins->type = STACK_OBJ;
6872 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6873 MonoInst *iargs [1];
6875 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6876 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6878 if (cfg->opt & MONO_OPT_SHARED) {
6879 MonoInst *iargs [3];
6881 if (cfg->compile_aot) {
6882 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6884 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6885 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6886 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6887 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6888 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6890 if (bblock->out_of_line) {
6891 MonoInst *iargs [2];
6893 if (cfg->method->klass->image == mono_defaults.corlib) {
6895 * Avoid relocations in AOT and save some space by using a
6896 * version of helper_ldstr specialized to mscorlib.
6898 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6899 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6901 /* Avoid creating the string object */
6902 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6903 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6904 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6908 if (cfg->compile_aot) {
6909 NEW_LDSTRCONST (cfg, ins, image, n);
6911 MONO_ADD_INS (bblock, ins);
6914 NEW_PCONST (cfg, ins, NULL);
6915 ins->type = STACK_OBJ;
6916 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6918 MONO_ADD_INS (bblock, ins);
6927 MonoInst *iargs [2];
6928 MonoMethodSignature *fsig;
6933 token = read32 (ip + 1);
6934 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6937 fsig = mono_method_get_signature (cmethod, image, token);
6939 mono_save_token_info (cfg, image, token, cmethod);
6941 if (!mono_class_init (cmethod->klass))
6944 if (cfg->generic_sharing_context)
6945 context_used = mono_method_check_context_used (cmethod);
6947 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6948 if (check_linkdemand (cfg, method, cmethod))
6950 CHECK_CFG_EXCEPTION;
6951 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6952 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6955 n = fsig->param_count;
6959 * Generate smaller code for the common newobj <exception> instruction in
6960 * argument checking code.
6962 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6963 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6964 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6965 MonoInst *iargs [3];
6969 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6972 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6976 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6981 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6984 g_assert_not_reached ();
6992 /* move the args to allow room for 'this' in the first position */
6998 /* check_call_signature () requires sp[0] to be set */
6999 this_ins.type = STACK_OBJ;
7001 if (check_call_signature (cfg, fsig, sp))
7006 if (mini_class_is_system_array (cmethod->klass)) {
7007 g_assert (!context_used);
7008 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7009 if (fsig->param_count == 2)
7010 /* Avoid varargs in the common case */
7011 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7013 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7014 } else if (cmethod->string_ctor) {
7015 g_assert (!context_used);
7016 /* we simply pass a null pointer */
7017 EMIT_NEW_PCONST (cfg, *sp, NULL);
7018 /* now call the string ctor */
7019 alloc = mono_emit_method_call (cfg, cmethod, fsig, sp, NULL);
7021 MonoInst* callvirt_this_arg = NULL;
7023 if (cmethod->klass->valuetype) {
7024 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7025 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7026 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7031 * The code generated by mini_emit_virtual_call () expects
7032 * iargs [0] to be a boxed instance, but luckily the vcall
7033 * will be transformed into a normal call there.
7035 } else if (context_used) {
7036 MonoInst *rgctx, *data;
7039 EMIT_GET_RGCTX (rgctx, context_used);
7040 if (cfg->opt & MONO_OPT_SHARED)
7041 rgctx_info = MONO_RGCTX_INFO_KLASS;
7043 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7044 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7046 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7049 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7051 CHECK_TYPELOAD (cmethod->klass);
7054 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7055 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7056 * As a workaround, we call class cctors before allocating objects.
7058 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7059 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7060 mono_emit_native_call (cfg, tramp,
7061 helper_sig_class_init_trampoline,
7063 if (cfg->verbose_level > 2)
7064 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7065 class_inits = g_slist_prepend (class_inits, vtable);
7068 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7073 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7075 /* Now call the actual ctor */
7076 /* Avoid virtual calls to ctors if possible */
7077 if (cmethod->klass->marshalbyref)
7078 callvirt_this_arg = sp [0];
7080 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7081 mono_method_check_inlining (cfg, cmethod) &&
7082 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7083 !g_list_find (dont_inline, cmethod)) {
7086 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7087 cfg->real_offset += 5;
7090 inline_costs += costs - 5;
7093 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7095 } else if (context_used &&
7096 (cmethod->klass->valuetype ||
7097 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7098 MonoInst *rgctx, *cmethod_addr;
7100 g_assert (!callvirt_this_arg);
7102 EMIT_GET_RGCTX (rgctx, context_used);
7103 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7104 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7106 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7109 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7113 if (alloc == NULL) {
7115 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7116 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7130 token = read32 (ip + 1);
7131 klass = mini_get_class (method, token, generic_context);
7132 CHECK_TYPELOAD (klass);
7133 if (sp [0]->type != STACK_OBJ)
7136 if (cfg->generic_sharing_context)
7137 context_used = mono_class_check_context_used (klass);
7140 MonoInst *rgctx, *args [2];
7142 g_assert (!method->klass->valuetype);
7148 EMIT_GET_RGCTX (rgctx, context_used);
7149 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7150 MONO_RGCTX_INFO_KLASS);
7152 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7156 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7157 MonoMethod *mono_castclass;
7158 MonoInst *iargs [1];
7161 mono_castclass = mono_marshal_get_castclass (klass);
7164 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7165 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7166 g_assert (costs > 0);
7169 cfg->real_offset += 5;
7174 inline_costs += costs;
7177 ins = handle_castclass (cfg, klass, *sp);
7187 token = read32 (ip + 1);
7188 klass = mini_get_class (method, token, generic_context);
7189 CHECK_TYPELOAD (klass);
7190 if (sp [0]->type != STACK_OBJ)
7193 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
7194 GENERIC_SHARING_FAILURE (CEE_ISINST);
7196 if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7198 MonoMethod *mono_isinst;
7199 MonoInst *iargs [1];
7202 mono_isinst = mono_marshal_get_isinst (klass);
7205 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7206 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7207 g_assert (costs > 0);
7210 cfg->real_offset += 5;
7215 inline_costs += costs;
7218 ins = handle_isinst (cfg, klass, *sp);
7224 case CEE_UNBOX_ANY: {
7225 MonoInst *rgctx = NULL;
7230 token = read32 (ip + 1);
7231 klass = mini_get_class (method, token, generic_context);
7232 CHECK_TYPELOAD (klass);
7234 if (cfg->generic_sharing_context)
7235 context_used = mono_class_check_context_used (klass);
7237 if (generic_class_is_reference_type (cfg, klass)) {
7240 MonoInst *iargs [2];
7242 g_assert (!method->klass->valuetype);
7247 EMIT_GET_RGCTX (rgctx, context_used);
7248 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7249 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7253 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7254 MonoMethod *mono_castclass;
7255 MonoInst *iargs [1];
7258 mono_castclass = mono_marshal_get_castclass (klass);
7261 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7262 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7264 g_assert (costs > 0);
7267 cfg->real_offset += 5;
7271 inline_costs += costs;
7273 ins = handle_castclass (cfg, klass, *sp);
7282 EMIT_GET_RGCTX (rgctx, context_used);
7284 if (mono_class_is_nullable (klass)) {
7285 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7292 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7298 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7306 int context_used = 0;
7312 token = read32 (ip + 1);
7313 klass = mini_get_class (method, token, generic_context);
7314 CHECK_TYPELOAD (klass);
7316 if (cfg->generic_sharing_context) {
7317 context_used = mono_class_check_context_used (klass);
7319 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7320 GENERIC_SHARING_FAILURE (*ip);
7323 if (generic_class_is_reference_type (cfg, klass)) {
7329 if (klass == mono_defaults.void_class)
7331 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7333 /* frequent check in generic code: box (struct), brtrue */
7334 if (!mono_class_is_nullable (klass) &&
7335 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7336 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7338 MONO_INST_NEW (cfg, ins, OP_BR);
7339 if (*ip == CEE_BRTRUE_S) {
7342 target = ip + 1 + (signed char)(*ip);
7347 target = ip + 4 + (gint)(read32 (ip));
7350 GET_BBLOCK (cfg, tblock, target);
7351 link_bblock (cfg, bblock, tblock);
7352 CHECK_BBLOCK (target, ip, tblock);
7353 ins->inst_target_bb = tblock;
7354 GET_BBLOCK (cfg, tblock, ip);
7356 * This leads to some inconsistency, since the two bblocks are not
7357 * really connected, but it is needed for handling stack arguments
7358 * correct (See test_0_box_brtrue_opt_regress_81102).
7360 link_bblock (cfg, bblock, tblock);
7361 if (sp != stack_start) {
7362 handle_stack_args (cfg, stack_start, sp - stack_start);
7364 CHECK_UNVERIFIABLE (cfg);
7366 MONO_ADD_INS (bblock, ins);
7367 start_new_bblock = 1;
7374 if (mono_class_is_nullable (klass)) {
7375 GENERIC_SHARING_FAILURE (CEE_BOX);
7380 EMIT_GET_RGCTX (rgctx, context_used);
7381 if (cfg->opt & MONO_OPT_SHARED)
7382 rgctx_info = MONO_RGCTX_INFO_KLASS;
7384 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7385 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7386 *sp++ = handle_box_from_inst (cfg, val, klass, data);
7389 *sp++ = handle_box (cfg, val, klass);
7397 MonoInst *rgctx = NULL;
7402 token = read32 (ip + 1);
7403 klass = mini_get_class (method, token, generic_context);
7404 CHECK_TYPELOAD (klass);
7406 if (cfg->generic_sharing_context)
7407 context_used = mono_class_check_context_used (klass);
7410 EMIT_GET_RGCTX (rgctx, context_used);
7412 if (mono_class_is_nullable (klass)) {
7415 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7416 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7420 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7430 MonoClassField *field;
7434 if (*ip == CEE_STFLD) {
7441 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7443 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7446 token = read32 (ip + 1);
7447 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7448 field = mono_method_get_wrapper_data (method, token);
7449 klass = field->parent;
7452 field = mono_field_from_token (image, token, &klass, generic_context);
7456 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7457 FIELD_ACCESS_FAILURE;
7458 mono_class_init (klass);
7460 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7461 if (*ip == CEE_STFLD) {
7462 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7464 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7465 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7466 MonoInst *iargs [5];
7469 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7470 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7471 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7475 if (cfg->opt & MONO_OPT_INLINE) {
7476 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7477 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7478 g_assert (costs > 0);
7481 cfg->real_offset += 5;
7484 inline_costs += costs;
7487 mono_emit_method_call (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper), iargs, NULL);
7492 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7494 store->flags |= ins_flag;
7501 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7502 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7503 MonoInst *iargs [4];
7506 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7507 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7508 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7509 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7510 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7511 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7513 g_assert (costs > 0);
7516 cfg->real_offset += 5;
7520 inline_costs += costs;
7523 ins = mono_emit_method_call (cfg, wrapper, mono_method_signature (wrapper), iargs, NULL);
7527 if (sp [0]->type == STACK_VTYPE) {
7530 /* Have to compute the address of the variable */
7532 var = cfg->vreg_to_inst [sp [0]->dreg];
7534 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7536 g_assert (var->klass == klass);
7538 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7542 if (*ip == CEE_LDFLDA) {
7543 dreg = alloc_preg (cfg);
7545 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7546 ins->klass = mono_class_from_mono_type (field->type);
7547 ins->type = STACK_MP;
7552 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7553 load->flags |= ins_flag;
7564 MonoClassField *field;
7565 gpointer addr = NULL;
7566 gboolean is_special_static;
7569 token = read32 (ip + 1);
7571 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7572 field = mono_method_get_wrapper_data (method, token);
7573 klass = field->parent;
7576 field = mono_field_from_token (image, token, &klass, generic_context);
7579 mono_class_init (klass);
7580 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7581 FIELD_ACCESS_FAILURE;
7584 * We can only support shared generic static
7585 * field access on architectures where the
7586 * trampoline code has been extended to handle
7587 * the generic class init.
7589 #ifndef MONO_ARCH_VTABLE_REG
7590 GENERIC_SHARING_FAILURE (*ip);
7593 if (cfg->generic_sharing_context)
7594 context_used = mono_class_check_context_used (klass);
7596 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7598 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7599 * to be called here.
7601 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7602 mono_class_vtable (cfg->domain, klass);
7603 CHECK_TYPELOAD (klass);
7605 mono_domain_lock (cfg->domain);
7606 if (cfg->domain->special_static_fields)
7607 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7608 mono_domain_unlock (cfg->domain);
7610 is_special_static = mono_class_field_is_special_static (field);
7612 /* Generate IR to compute the field address */
7614 if ((cfg->opt & MONO_OPT_SHARED) ||
7615 (cfg->compile_aot && is_special_static) ||
7616 (context_used && is_special_static)) {
7617 MonoInst *iargs [2];
7619 g_assert (field->parent);
7620 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7624 EMIT_GET_RGCTX (rgctx, context_used);
7625 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7627 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7629 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7630 } else if (context_used) {
7631 MonoInst *rgctx, *static_data;
7634 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7635 method->klass->name_space, method->klass->name, method->name,
7636 depth, field->offset);
7639 if (mono_class_needs_cctor_run (klass, method)) {
7641 MonoInst *vtable, *rgctx;
7643 EMIT_GET_RGCTX (rgctx, context_used);
7644 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7646 // FIXME: This doesn't work since it tries to pass the argument
7647 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7649 * The vtable pointer is always passed in a register regardless of
7650 * the calling convention, so assign it manually, and make a call
7651 * using a signature without parameters.
7653 call = (MonoCallInst*)mono_emit_native_call (cfg, mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT), helper_sig_generic_class_init_trampoline, &vtable);
7654 #ifdef MONO_ARCH_VTABLE_REG
7655 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7662 * The pointer we're computing here is
7664 * super_info.static_data + field->offset
7666 EMIT_GET_RGCTX (rgctx, context_used);
7667 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7669 if (field->offset == 0) {
7672 int addr_reg = mono_alloc_preg (cfg);
7673 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7675 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7676 MonoInst *iargs [2];
7678 g_assert (field->parent);
7679 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7680 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7681 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7683 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7685 CHECK_TYPELOAD (klass);
7687 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7688 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7689 mono_emit_native_call (cfg, tramp,
7690 helper_sig_class_init_trampoline,
7692 if (cfg->verbose_level > 2)
7693 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7694 class_inits = g_slist_prepend (class_inits, vtable);
7696 if (cfg->run_cctors) {
7698 /* This makes so that inline cannot trigger */
7699 /* .cctors: too many apps depend on them */
7700 /* running with a specific order... */
7701 if (! vtable->initialized)
7703 ex = mono_runtime_class_init_full (vtable, FALSE);
7705 set_exception_object (cfg, ex);
7706 goto exception_exit;
7710 addr = (char*)vtable->data + field->offset;
7712 if (cfg->compile_aot)
7713 EMIT_NEW_SFLDACONST (cfg, ins, field);
7715 EMIT_NEW_PCONST (cfg, ins, addr);
7718 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7719 * This could be later optimized to do just a couple of
7720 * memory dereferences with constant offsets.
7722 MonoInst *iargs [1];
7723 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7724 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7728 /* Generate IR to do the actual load/store operation */
7730 if (*ip == CEE_LDSFLDA) {
7731 ins->klass = mono_class_from_mono_type (field->type);
7733 } else if (*ip == CEE_STSFLD) {
7738 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7739 store->flags |= ins_flag;
7742 gboolean is_const = FALSE;
7743 MonoVTable *vtable = NULL;
7745 if (!context_used) {
7746 vtable = mono_class_vtable (cfg->domain, klass);
7747 CHECK_TYPELOAD (klass);
7749 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7750 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7751 gpointer addr = (char*)vtable->data + field->offset;
7752 int ro_type = field->type->type;
7753 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7754 ro_type = field->type->data.klass->enum_basetype->type;
7756 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7759 case MONO_TYPE_BOOLEAN:
7761 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7765 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7768 case MONO_TYPE_CHAR:
7770 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7774 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7779 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7783 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7788 case MONO_TYPE_STRING:
7789 case MONO_TYPE_OBJECT:
7790 case MONO_TYPE_CLASS:
7791 case MONO_TYPE_SZARRAY:
7793 case MONO_TYPE_FNPTR:
7794 case MONO_TYPE_ARRAY:
7795 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7796 type_to_eval_stack_type ((cfg), field->type, *sp);
7801 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7806 case MONO_TYPE_VALUETYPE:
7816 CHECK_STACK_OVF (1);
7818 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7819 load->flags |= ins_flag;
7823 /* fixme: dont see the problem why this does not work */
7824 //cfg->disable_aot = TRUE;
7834 token = read32 (ip + 1);
7835 klass = mini_get_class (method, token, generic_context);
7836 CHECK_TYPELOAD (klass);
7837 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7838 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7849 const char *data_ptr;
7856 token = read32 (ip + 1);
7858 klass = mini_get_class (method, token, generic_context);
7859 CHECK_TYPELOAD (klass);
7861 if (cfg->generic_sharing_context)
7862 context_used = mono_class_check_context_used (klass);
7868 /* FIXME: Decompose later to help abcrem */
7871 EMIT_GET_RGCTX (rgctx, context_used);
7872 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7877 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7879 if (cfg->opt & MONO_OPT_SHARED) {
7880 /* Decompose now to avoid problems with references to the domainvar */
7881 MonoInst *iargs [3];
7883 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7884 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7887 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7889 /* Decompose later since it is needed by abcrem */
7890 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7891 ins->dreg = alloc_preg (cfg);
7892 ins->sreg1 = sp [0]->dreg;
7893 ins->inst_newa_class = klass;
7894 ins->type = STACK_OBJ;
7896 MONO_ADD_INS (cfg->cbb, ins);
7897 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7898 cfg->cbb->has_array_access = TRUE;
7900 /* Needed so mono_emit_load_get_addr () gets called */
7901 mono_get_got_var (cfg);
7911 * we inline/optimize the initialization sequence if possible.
7912 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7913 * for small sizes open code the memcpy
7914 * ensure the rva field is big enough
7916 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7917 MonoMethod *memcpy_method = get_memcpy_method ();
7918 MonoInst *iargs [3];
7919 int add_reg = alloc_preg (cfg);
7921 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7922 if (cfg->compile_aot) {
7923 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7925 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7927 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7928 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
7937 if (sp [0]->type != STACK_OBJ)
7940 dreg = alloc_preg (cfg);
7941 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7942 ins->dreg = alloc_preg (cfg);
7943 ins->sreg1 = sp [0]->dreg;
7944 ins->type = STACK_I4;
7945 MONO_ADD_INS (cfg->cbb, ins);
7946 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7947 cfg->cbb->has_array_access = TRUE;
7955 if (sp [0]->type != STACK_OBJ)
7958 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7960 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7961 CHECK_TYPELOAD (klass);
7962 /* we need to make sure that this array is exactly the type it needs
7963 * to be for correctness. the wrappers are lax with their usage
7964 * so we need to ignore them here
7966 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7967 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7970 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7974 case CEE_LDELEM_ANY:
7985 case CEE_LDELEM_REF: {
7991 if (*ip == CEE_LDELEM_ANY) {
7993 token = read32 (ip + 1);
7994 klass = mini_get_class (method, token, generic_context);
7995 CHECK_TYPELOAD (klass);
7996 mono_class_init (klass);
7999 klass = array_access_to_klass (*ip);
8001 if (sp [0]->type != STACK_OBJ)
8004 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8006 if (sp [1]->opcode == OP_ICONST) {
8007 int array_reg = sp [0]->dreg;
8008 int index_reg = sp [1]->dreg;
8009 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8011 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8012 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8014 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8015 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8018 if (*ip == CEE_LDELEM_ANY)
8031 case CEE_STELEM_REF:
8032 case CEE_STELEM_ANY: {
8038 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8040 if (*ip == CEE_STELEM_ANY) {
8042 token = read32 (ip + 1);
8043 klass = mini_get_class (method, token, generic_context);
8044 CHECK_TYPELOAD (klass);
8045 mono_class_init (klass);
8048 klass = array_access_to_klass (*ip);
8050 if (sp [0]->type != STACK_OBJ)
8053 /* storing a NULL doesn't need any of the complex checks in stelemref */
8054 if (generic_class_is_reference_type (cfg, klass) &&
8055 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8056 MonoMethod* helper = mono_marshal_get_stelemref ();
8057 MonoInst *iargs [3];
8059 if (sp [0]->type != STACK_OBJ)
8061 if (sp [2]->type != STACK_OBJ)
8068 mono_emit_method_call (cfg, helper, mono_method_signature (helper), iargs, NULL);
8070 if (sp [1]->opcode == OP_ICONST) {
8071 int array_reg = sp [0]->dreg;
8072 int index_reg = sp [1]->dreg;
8073 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8075 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8076 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8078 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8079 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8083 if (*ip == CEE_STELEM_ANY)
8090 case CEE_CKFINITE: {
8094 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8095 ins->sreg1 = sp [0]->dreg;
8096 ins->dreg = alloc_freg (cfg);
8097 ins->type = STACK_R8;
8098 MONO_ADD_INS (bblock, ins);
8104 case CEE_REFANYVAL: {
8105 MonoInst *src_var, *src;
8106 int context_used = 0;
8108 int klass_reg = alloc_preg (cfg);
8109 int dreg = alloc_preg (cfg);
8112 MONO_INST_NEW (cfg, ins, *ip);
8115 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8116 CHECK_TYPELOAD (klass);
8117 mono_class_init (klass);
8119 if (cfg->generic_sharing_context) {
8120 context_used = mono_class_check_context_used (klass);
8121 if (context_used && cfg->compile_aot)
8122 GENERIC_SHARING_FAILURE (*ip);
8127 GENERIC_SHARING_FAILURE (*ip);
8130 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8132 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8133 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8135 mini_emit_class_check (cfg, klass_reg, klass);
8136 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8138 ins->type = STACK_MP;
8143 case CEE_MKREFANY: {
8144 MonoInst *loc, *addr;
8145 int context_used = 0;
8148 MONO_INST_NEW (cfg, ins, *ip);
8151 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8152 CHECK_TYPELOAD (klass);
8153 mono_class_init (klass);
8155 if (cfg->generic_sharing_context) {
8156 context_used = mono_class_check_context_used (klass);
8157 if (context_used && cfg->compile_aot)
8158 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8161 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8162 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8165 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8166 } else if (cfg->compile_aot) {
8167 int const_reg = alloc_preg (cfg);
8168 int type_reg = alloc_preg (cfg);
8170 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8172 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8175 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8176 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8178 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8180 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8181 ins->type = STACK_VTYPE;
8182 ins->klass = mono_defaults.typed_reference_class;
8189 MonoClass *handle_class;
8190 int context_used = 0;
8192 CHECK_STACK_OVF (1);
8195 n = read32 (ip + 1);
8197 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8198 handle = mono_method_get_wrapper_data (method, n);
8199 handle_class = mono_method_get_wrapper_data (method, n + 1);
8200 if (handle_class == mono_defaults.typehandle_class)
8201 handle = &((MonoClass*)handle)->byval_arg;
8204 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8208 mono_class_init (handle_class);
8209 if (cfg->generic_sharing_context) {
8210 if (handle_class == mono_defaults.typehandle_class) {
8211 /* If we get a MONO_TYPE_CLASS
8212 then we need to provide the
8214 instantiation of it. */
8215 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8218 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8219 } else if (handle_class == mono_defaults.fieldhandle_class)
8220 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8221 else if (handle_class == mono_defaults.methodhandle_class)
8222 context_used = mono_method_check_context_used (handle);
8224 g_assert_not_reached ();
8227 if (cfg->opt & MONO_OPT_SHARED) {
8228 MonoInst *addr, *vtvar, *iargs [3];
8229 int method_context_used;
8231 if (cfg->generic_sharing_context)
8232 method_context_used = mono_method_check_context_used (method);
8234 method_context_used = 0;
8236 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8238 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8239 EMIT_NEW_ICONST (cfg, iargs [1], n);
8240 if (method_context_used) {
8243 EMIT_GET_RGCTX (rgctx, method_context_used);
8244 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8245 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8247 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8248 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8250 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8252 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8254 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8256 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8257 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8258 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8259 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8260 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8261 MonoClass *tclass = mono_class_from_mono_type (handle);
8263 mono_class_init (tclass);
8267 g_assert (!cfg->compile_aot);
8268 EMIT_GET_RGCTX (rgctx, context_used);
8269 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8270 } else if (cfg->compile_aot) {
8272 * FIXME: We would have to include the context into the
8273 * aot constant too (tests/generic-array-type.2.exe).
8275 if (generic_context)
8276 cfg->disable_aot = TRUE;
8277 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8279 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8281 ins->type = STACK_OBJ;
8282 ins->klass = cmethod->klass;
8285 MonoInst *addr, *vtvar;
8287 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8292 g_assert (!cfg->compile_aot);
8294 EMIT_GET_RGCTX (rgctx, context_used);
8295 if (handle_class == mono_defaults.typehandle_class) {
8296 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8297 mono_class_from_mono_type (handle),
8298 MONO_RGCTX_INFO_TYPE);
8299 } else if (handle_class == mono_defaults.methodhandle_class) {
8300 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8301 handle, MONO_RGCTX_INFO_METHOD);
8302 } else if (handle_class == mono_defaults.fieldhandle_class) {
8303 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8304 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8306 g_assert_not_reached ();
8308 } else if (cfg->compile_aot) {
8309 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8311 EMIT_NEW_PCONST (cfg, ins, handle);
8313 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8314 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8315 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8325 MONO_INST_NEW (cfg, ins, OP_THROW);
8327 ins->sreg1 = sp [0]->dreg;
8329 bblock->out_of_line = TRUE;
8330 MONO_ADD_INS (bblock, ins);
8331 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8332 MONO_ADD_INS (bblock, ins);
8335 link_bblock (cfg, bblock, end_bblock);
8336 start_new_bblock = 1;
8338 case CEE_ENDFINALLY:
8339 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8340 MONO_ADD_INS (bblock, ins);
8342 start_new_bblock = 1;
8345 * Control will leave the method so empty the stack, otherwise
8346 * the next basic block will start with a nonempty stack.
8348 while (sp != stack_start) {
8356 if (*ip == CEE_LEAVE) {
8358 target = ip + 5 + (gint32)read32(ip + 1);
8361 target = ip + 2 + (signed char)(ip [1]);
8364 /* empty the stack */
8365 while (sp != stack_start) {
8370 * If this leave statement is in a catch block, check for a
8371 * pending exception, and rethrow it if necessary.
8373 for (i = 0; i < header->num_clauses; ++i) {
8374 MonoExceptionClause *clause = &header->clauses [i];
8377 * Use <= in the final comparison to handle clauses with multiple
8378 * leave statements, like in bug #78024.
8379 * The ordering of the exception clauses guarantees that we find the
8382 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8384 MonoBasicBlock *dont_throw;
8389 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8392 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8394 NEW_BBLOCK (cfg, dont_throw);
8397 * Currently, we allways rethrow the abort exception, despite the
8398 * fact that this is not correct. See thread6.cs for an example.
8399 * But propagating the abort exception is more important than
8400 * getting the sematics right.
8402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8404 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8406 MONO_START_BB (cfg, dont_throw);
8411 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8413 for (tmp = handlers; tmp; tmp = tmp->next) {
8415 link_bblock (cfg, bblock, tblock);
8416 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8417 ins->inst_target_bb = tblock;
8418 MONO_ADD_INS (bblock, ins);
8420 g_list_free (handlers);
8423 MONO_INST_NEW (cfg, ins, OP_BR);
8424 MONO_ADD_INS (bblock, ins);
8425 GET_BBLOCK (cfg, tblock, target);
8426 link_bblock (cfg, bblock, tblock);
8427 CHECK_BBLOCK (target, ip, tblock);
8428 ins->inst_target_bb = tblock;
8429 start_new_bblock = 1;
8431 if (*ip == CEE_LEAVE)
8440 * Mono specific opcodes
8442 case MONO_CUSTOM_PREFIX: {
8444 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8448 case CEE_MONO_ICALL: {
8450 MonoJitICallInfo *info;
8452 token = read32 (ip + 2);
8453 func = mono_method_get_wrapper_data (method, token);
8454 info = mono_find_jit_icall_by_addr (func);
8457 CHECK_STACK (info->sig->param_count);
8458 sp -= info->sig->param_count;
8460 ins = mono_emit_jit_icall (cfg, info->func, sp);
8461 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8465 inline_costs += 10 * num_calls++;
8469 case CEE_MONO_LDPTR: {
8472 CHECK_STACK_OVF (1);
8474 token = read32 (ip + 2);
8476 ptr = mono_method_get_wrapper_data (method, token);
8477 if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
8478 MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
8480 if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
8481 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, wrapped);
8487 if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8488 MonoJitICallInfo *callinfo;
8489 const char *icall_name;
8491 icall_name = method->name + strlen ("__icall_wrapper_");
8492 g_assert (icall_name);
8493 callinfo = mono_find_jit_icall_by_name (icall_name);
8494 g_assert (callinfo);
8496 if (ptr == callinfo->func) {
8497 /* Will be transformed into an AOTCONST later */
8498 EMIT_NEW_PCONST (cfg, ins, ptr);
8505 /* FIXME: Generalize this */
8506 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8507 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8512 EMIT_NEW_PCONST (cfg, ins, ptr);
8515 inline_costs += 10 * num_calls++;
8516 /* Can't embed random pointers into AOT code */
8517 cfg->disable_aot = 1;
8520 case CEE_MONO_VTADDR: {
8521 MonoInst *src_var, *src;
8527 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8528 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8533 case CEE_MONO_NEWOBJ: {
8534 MonoInst *iargs [2];
8536 CHECK_STACK_OVF (1);
8538 token = read32 (ip + 2);
8539 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8540 mono_class_init (klass);
8541 NEW_DOMAINCONST (cfg, iargs [0]);
8542 MONO_ADD_INS (cfg->cbb, iargs [0]);
8543 NEW_CLASSCONST (cfg, iargs [1], klass);
8544 MONO_ADD_INS (cfg->cbb, iargs [1]);
8545 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8547 inline_costs += 10 * num_calls++;
8550 case CEE_MONO_OBJADDR:
8553 MONO_INST_NEW (cfg, ins, OP_MOVE);
8554 ins->dreg = alloc_preg (cfg);
8555 ins->sreg1 = sp [0]->dreg;
8556 ins->type = STACK_MP;
8557 MONO_ADD_INS (cfg->cbb, ins);
8561 case CEE_MONO_LDNATIVEOBJ:
8563 * Similar to LDOBJ, but instead load the unmanaged
8564 * representation of the vtype to the stack.
8569 token = read32 (ip + 2);
8570 klass = mono_method_get_wrapper_data (method, token);
8571 g_assert (klass->valuetype);
8572 mono_class_init (klass);
8575 MonoInst *src, *dest, *temp;
8578 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8579 temp->backend.is_pinvoke = 1;
8580 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8581 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8583 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8584 dest->type = STACK_VTYPE;
8585 dest->klass = klass;
8591 case CEE_MONO_RETOBJ: {
8593 * Same as RET, but return the native representation of a vtype
8596 g_assert (cfg->ret);
8597 g_assert (mono_method_signature (method)->pinvoke);
8602 token = read32 (ip + 2);
8603 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8605 if (!cfg->vret_addr) {
8606 g_assert (cfg->ret_var_is_local);
8608 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8610 EMIT_NEW_RETLOADA (cfg, ins);
8612 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8614 if (sp != stack_start)
8617 MONO_INST_NEW (cfg, ins, OP_BR);
8618 ins->inst_target_bb = end_bblock;
8619 MONO_ADD_INS (bblock, ins);
8620 link_bblock (cfg, bblock, end_bblock);
8621 start_new_bblock = 1;
8625 case CEE_MONO_CISINST:
8626 case CEE_MONO_CCASTCLASS: {
8631 token = read32 (ip + 2);
8632 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8633 if (ip [1] == CEE_MONO_CISINST)
8634 ins = handle_cisinst (cfg, klass, sp [0]);
8636 ins = handle_ccastclass (cfg, klass, sp [0]);
8642 case CEE_MONO_SAVE_LMF:
8643 case CEE_MONO_RESTORE_LMF:
8644 #ifdef MONO_ARCH_HAVE_LMF_OPS
8645 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8646 MONO_ADD_INS (bblock, ins);
8647 cfg->need_lmf_area = TRUE;
8651 case CEE_MONO_CLASSCONST:
8652 CHECK_STACK_OVF (1);
8654 token = read32 (ip + 2);
8655 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8658 inline_costs += 10 * num_calls++;
8660 case CEE_MONO_NOT_TAKEN:
8661 bblock->out_of_line = TRUE;
8665 CHECK_STACK_OVF (1);
8667 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8668 ins->dreg = alloc_preg (cfg);
8669 ins->inst_offset = (gint32)read32 (ip + 2);
8670 ins->type = STACK_PTR;
8671 MONO_ADD_INS (bblock, ins);
8676 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8686 /* somewhat similar to LDTOKEN */
8687 MonoInst *addr, *vtvar;
8688 CHECK_STACK_OVF (1);
8689 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8691 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8692 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8694 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8695 ins->type = STACK_VTYPE;
8696 ins->klass = mono_defaults.argumenthandle_class;
8709 * The following transforms:
8710 * CEE_CEQ into OP_CEQ
8711 * CEE_CGT into OP_CGT
8712 * CEE_CGT_UN into OP_CGT_UN
8713 * CEE_CLT into OP_CLT
8714 * CEE_CLT_UN into OP_CLT_UN
8716 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8718 MONO_INST_NEW (cfg, ins, cmp->opcode);
8720 cmp->sreg1 = sp [0]->dreg;
8721 cmp->sreg2 = sp [1]->dreg;
8722 type_from_op (cmp, sp [0], sp [1]);
8724 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8725 cmp->opcode = OP_LCOMPARE;
8726 else if (sp [0]->type == STACK_R8)
8727 cmp->opcode = OP_FCOMPARE;
8729 cmp->opcode = OP_ICOMPARE;
8730 MONO_ADD_INS (bblock, cmp);
8731 ins->type = STACK_I4;
8732 ins->dreg = alloc_dreg (cfg, ins->type);
8733 type_from_op (ins, sp [0], sp [1]);
8735 if (cmp->opcode == OP_FCOMPARE) {
8737 * The backends expect the fceq opcodes to do the
8740 cmp->opcode = OP_NOP;
8741 ins->sreg1 = cmp->sreg1;
8742 ins->sreg2 = cmp->sreg2;
8744 MONO_ADD_INS (bblock, ins);
8751 MonoMethod *cil_method, *ctor_method;
8752 gboolean is_shared = FALSE;
8754 CHECK_STACK_OVF (1);
8756 n = read32 (ip + 2);
8757 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8760 mono_class_init (cmethod->klass);
8762 if (cfg->generic_sharing_context)
8763 context_used = mono_method_check_context_used (cmethod);
8765 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8766 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8767 (cmethod->klass->generic_class ||
8768 cmethod->klass->generic_container)) {
8771 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8775 cil_method = cmethod;
8776 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8777 METHOD_ACCESS_FAILURE;
8779 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8780 if (check_linkdemand (cfg, method, cmethod))
8782 CHECK_CFG_EXCEPTION;
8783 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8784 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8788 * Optimize the common case of ldftn+delegate creation
8790 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8791 /* FIXME: SGEN support */
8792 /* FIXME: handle shared static generic methods */
8793 /* FIXME: handle this in shared code */
8794 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8795 MonoInst *target_ins;
8798 if (cfg->verbose_level > 3)
8799 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8800 target_ins = sp [-1];
8802 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8813 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8815 EMIT_GET_RGCTX (rgctx, context_used);
8816 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8817 } else if (is_shared) {
8818 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8820 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8822 if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
8823 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8825 ins = mono_emit_jit_icall (cfg, mono_ldftn_nosync, &argconst);
8829 inline_costs += 10 * num_calls++;
8832 case CEE_LDVIRTFTN: {
8837 n = read32 (ip + 2);
8838 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8841 mono_class_init (cmethod->klass);
8843 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8844 GENERIC_SHARING_FAILURE (CEE_LDVIRTFTN);
8846 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8847 if (check_linkdemand (cfg, method, cmethod))
8849 CHECK_CFG_EXCEPTION;
8850 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8851 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8856 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8857 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8860 inline_costs += 10 * num_calls++;
8864 CHECK_STACK_OVF (1);
8866 n = read16 (ip + 2);
8868 EMIT_NEW_ARGLOAD (cfg, ins, n);
8873 CHECK_STACK_OVF (1);
8875 n = read16 (ip + 2);
8877 NEW_ARGLOADA (cfg, ins, n);
8878 MONO_ADD_INS (cfg->cbb, ins);
8886 n = read16 (ip + 2);
8888 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8890 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8894 CHECK_STACK_OVF (1);
8896 n = read16 (ip + 2);
8898 EMIT_NEW_LOCLOAD (cfg, ins, n);
8903 CHECK_STACK_OVF (1);
8905 n = read16 (ip + 2);
8907 EMIT_NEW_LOCLOADA (cfg, ins, n);
8915 n = read16 (ip + 2);
8917 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8919 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8926 if (sp != stack_start)
8928 if (cfg->method != method)
8930 * Inlining this into a loop in a parent could lead to
8931 * stack overflows which is different behavior than the
8932 * non-inlined case, thus disable inlining in this case.
8934 goto inline_failure;
8936 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8937 ins->dreg = alloc_preg (cfg);
8938 ins->sreg1 = sp [0]->dreg;
8939 ins->type = STACK_PTR;
8940 MONO_ADD_INS (cfg->cbb, ins);
8942 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8943 if (header->init_locals)
8944 ins->flags |= MONO_INST_INIT;
8949 case CEE_ENDFILTER: {
8950 MonoExceptionClause *clause, *nearest;
8951 int cc, nearest_num;
8955 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8957 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8958 ins->sreg1 = (*sp)->dreg;
8959 MONO_ADD_INS (bblock, ins);
8960 start_new_bblock = 1;
8965 for (cc = 0; cc < header->num_clauses; ++cc) {
8966 clause = &header->clauses [cc];
8967 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8968 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8969 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8975 if ((ip - header->code) != nearest->handler_offset)
8980 case CEE_UNALIGNED_:
8981 ins_flag |= MONO_INST_UNALIGNED;
8982 /* FIXME: record alignment? we can assume 1 for now */
8987 ins_flag |= MONO_INST_VOLATILE;
8991 ins_flag |= MONO_INST_TAILCALL;
8992 cfg->flags |= MONO_CFG_HAS_TAIL;
8993 /* Can't inline tail calls at this time */
8994 inline_costs += 100000;
9001 token = read32 (ip + 2);
9002 klass = mini_get_class (method, token, generic_context);
9003 CHECK_TYPELOAD (klass);
9004 if (generic_class_is_reference_type (cfg, klass)) {
9005 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9007 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9008 mini_emit_initobj (cfg, *sp, NULL, klass);
9013 case CEE_CONSTRAINED_:
9015 token = read32 (ip + 2);
9016 constrained_call = mono_class_get_full (image, token, generic_context);
9017 CHECK_TYPELOAD (constrained_call);
9022 MonoInst *iargs [3];
9026 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9027 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9028 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9029 /* emit_memset only works when val == 0 */
9030 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9035 if (ip [1] == CEE_CPBLK) {
9036 MonoMethod *memcpy_method = get_memcpy_method ();
9037 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
9039 MonoMethod *memset_method = get_memset_method ();
9040 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
9050 ins_flag |= MONO_INST_NOTYPECHECK;
9052 ins_flag |= MONO_INST_NORANGECHECK;
9053 /* we ignore the no-nullcheck for now since we
9054 * really do it explicitly only when doing callvirt->call
9060 int handler_offset = -1;
9062 for (i = 0; i < header->num_clauses; ++i) {
9063 MonoExceptionClause *clause = &header->clauses [i];
9064 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY))
9065 handler_offset = clause->handler_offset;
9068 bblock->flags |= BB_EXCEPTION_UNSAFE;
9070 g_assert (handler_offset != -1);
9072 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9073 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9074 ins->sreg1 = load->dreg;
9075 MONO_ADD_INS (bblock, ins);
9077 link_bblock (cfg, bblock, end_bblock);
9078 start_new_bblock = 1;
9086 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9088 CHECK_STACK_OVF (1);
9090 token = read32 (ip + 2);
9091 /* FIXXME: handle generics. */
9092 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9093 MonoType *type = mono_type_create_from_typespec (image, token);
9094 token = mono_type_size (type, &ialign);
9096 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9097 CHECK_TYPELOAD (klass);
9098 mono_class_init (klass);
9099 token = mono_class_value_size (klass, &align);
9101 EMIT_NEW_ICONST (cfg, ins, token);
9106 case CEE_REFANYTYPE: {
9107 MonoInst *src_var, *src;
9113 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9115 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9116 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9117 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9127 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9132 g_error ("opcode 0x%02x not handled", *ip);
9135 if (start_new_bblock != 1)
9138 bblock->cil_length = ip - bblock->cil_code;
9139 bblock->next_bb = end_bblock;
9141 if (cfg->method == method && cfg->domainvar) {
9143 MonoInst *get_domain;
9145 cfg->cbb = init_localsbb;
9147 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9148 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9151 get_domain->dreg = alloc_preg (cfg);
9152 MONO_ADD_INS (cfg->cbb, get_domain);
9154 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9155 MONO_ADD_INS (cfg->cbb, store);
9158 if (cfg->method == method && cfg->got_var)
9159 mono_emit_load_got_addr (cfg);
9161 if (header->init_locals) {
9164 cfg->cbb = init_localsbb;
9165 cfg->ip = header->code;
9166 for (i = 0; i < header->num_locals; ++i) {
9167 MonoType *ptype = header->locals [i];
9168 int t = ptype->type;
9169 dreg = cfg->locals [i]->dreg;
9171 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9172 t = ptype->data.klass->enum_basetype->type;
9174 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9175 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9176 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9177 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9178 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9179 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9180 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9181 ins->type = STACK_R8;
9182 ins->inst_p0 = (void*)&r8_0;
9183 ins->dreg = alloc_dreg (cfg, STACK_R8);
9184 MONO_ADD_INS (init_localsbb, ins);
9185 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9186 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9187 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9188 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9190 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9197 /* resolve backward branches in the middle of an existing basic block */
9198 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9200 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9201 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9202 if (tblock != start_bblock) {
9204 split_bblock (cfg, tblock, bblock);
9205 l = bblock->cil_code - header->code;
9206 bblock->cil_length = tblock->cil_length - l;
9207 tblock->cil_length = l;
9209 printf ("recheck failed.\n");
9213 if (cfg->method == method) {
9215 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9216 bb->region = mono_find_block_region (cfg, bb->real_offset);
9218 mono_create_spvar_for_region (cfg, bb->region);
9219 if (cfg->verbose_level > 2)
9220 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9224 g_slist_free (class_inits);
9225 dont_inline = g_list_remove (dont_inline, method);
9227 if (inline_costs < 0) {
9230 /* Method is too large */
9231 mname = mono_method_full_name (method, TRUE);
9232 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9233 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9238 if ((cfg->verbose_level > 1) && (cfg->method == method))
9239 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9241 return inline_costs;
9244 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9245 g_slist_free (class_inits);
9246 dont_inline = g_list_remove (dont_inline, method);
9250 g_slist_free (class_inits);
9251 dont_inline = g_list_remove (dont_inline, method);
9255 g_slist_free (class_inits);
9256 dont_inline = g_list_remove (dont_inline, method);
9257 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9261 g_slist_free (class_inits);
9262 dont_inline = g_list_remove (dont_inline, method);
9263 set_exception_type_from_invalid_il (cfg, method, ip);
9268 store_membase_reg_to_store_membase_imm (int opcode)
9271 case OP_STORE_MEMBASE_REG:
9272 return OP_STORE_MEMBASE_IMM;
9273 case OP_STOREI1_MEMBASE_REG:
9274 return OP_STOREI1_MEMBASE_IMM;
9275 case OP_STOREI2_MEMBASE_REG:
9276 return OP_STOREI2_MEMBASE_IMM;
9277 case OP_STOREI4_MEMBASE_REG:
9278 return OP_STOREI4_MEMBASE_IMM;
9279 case OP_STOREI8_MEMBASE_REG:
9280 return OP_STOREI8_MEMBASE_IMM;
9282 g_assert_not_reached ();
9289 mono_op_to_op_imm (int opcode)
9299 return OP_IDIV_UN_IMM;
9303 return OP_IREM_UN_IMM;
9317 return OP_ISHR_UN_IMM;
9334 return OP_LSHR_UN_IMM;
9337 return OP_COMPARE_IMM;
9339 return OP_ICOMPARE_IMM;
9341 return OP_LCOMPARE_IMM;
9343 case OP_STORE_MEMBASE_REG:
9344 return OP_STORE_MEMBASE_IMM;
9345 case OP_STOREI1_MEMBASE_REG:
9346 return OP_STOREI1_MEMBASE_IMM;
9347 case OP_STOREI2_MEMBASE_REG:
9348 return OP_STOREI2_MEMBASE_IMM;
9349 case OP_STOREI4_MEMBASE_REG:
9350 return OP_STOREI4_MEMBASE_IMM;
9352 #if defined(__i386__) || defined (__x86_64__)
9354 return OP_X86_PUSH_IMM;
9355 case OP_X86_COMPARE_MEMBASE_REG:
9356 return OP_X86_COMPARE_MEMBASE_IMM;
9358 #if defined(__x86_64__)
9359 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9360 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9362 case OP_VOIDCALL_REG:
9371 return OP_LOCALLOC_IMM;
9378 ldind_to_load_membase (int opcode)
9382 return OP_LOADI1_MEMBASE;
9384 return OP_LOADU1_MEMBASE;
9386 return OP_LOADI2_MEMBASE;
9388 return OP_LOADU2_MEMBASE;
9390 return OP_LOADI4_MEMBASE;
9392 return OP_LOADU4_MEMBASE;
9394 return OP_LOAD_MEMBASE;
9396 return OP_LOAD_MEMBASE;
9398 return OP_LOADI8_MEMBASE;
9400 return OP_LOADR4_MEMBASE;
9402 return OP_LOADR8_MEMBASE;
9404 g_assert_not_reached ();
9411 stind_to_store_membase (int opcode)
9415 return OP_STOREI1_MEMBASE_REG;
9417 return OP_STOREI2_MEMBASE_REG;
9419 return OP_STOREI4_MEMBASE_REG;
9422 return OP_STORE_MEMBASE_REG;
9424 return OP_STOREI8_MEMBASE_REG;
9426 return OP_STORER4_MEMBASE_REG;
9428 return OP_STORER8_MEMBASE_REG;
9430 g_assert_not_reached ();
9437 mono_load_membase_to_load_mem (int opcode)
9439 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9440 #if defined(__i386__) || defined(__x86_64__)
9442 case OP_LOAD_MEMBASE:
9444 case OP_LOADU1_MEMBASE:
9445 return OP_LOADU1_MEM;
9446 case OP_LOADU2_MEMBASE:
9447 return OP_LOADU2_MEM;
9448 case OP_LOADI4_MEMBASE:
9449 return OP_LOADI4_MEM;
9450 case OP_LOADU4_MEMBASE:
9451 return OP_LOADU4_MEM;
9452 #if SIZEOF_VOID_P == 8
9453 case OP_LOADI8_MEMBASE:
9454 return OP_LOADI8_MEM;
9463 op_to_op_dest_membase (int store_opcode, int opcode)
9465 #if defined(__i386__)
9466 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9471 return OP_X86_ADD_MEMBASE_REG;
9473 return OP_X86_SUB_MEMBASE_REG;
9475 return OP_X86_AND_MEMBASE_REG;
9477 return OP_X86_OR_MEMBASE_REG;
9479 return OP_X86_XOR_MEMBASE_REG;
9482 return OP_X86_ADD_MEMBASE_IMM;
9485 return OP_X86_SUB_MEMBASE_IMM;
9488 return OP_X86_AND_MEMBASE_IMM;
9491 return OP_X86_OR_MEMBASE_IMM;
9494 return OP_X86_XOR_MEMBASE_IMM;
9500 #if defined(__x86_64__)
9501 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9506 return OP_X86_ADD_MEMBASE_REG;
9508 return OP_X86_SUB_MEMBASE_REG;
9510 return OP_X86_AND_MEMBASE_REG;
9512 return OP_X86_OR_MEMBASE_REG;
9514 return OP_X86_XOR_MEMBASE_REG;
9516 return OP_X86_ADD_MEMBASE_IMM;
9518 return OP_X86_SUB_MEMBASE_IMM;
9520 return OP_X86_AND_MEMBASE_IMM;
9522 return OP_X86_OR_MEMBASE_IMM;
9524 return OP_X86_XOR_MEMBASE_IMM;
9526 return OP_AMD64_ADD_MEMBASE_REG;
9528 return OP_AMD64_SUB_MEMBASE_REG;
9530 return OP_AMD64_AND_MEMBASE_REG;
9532 return OP_AMD64_OR_MEMBASE_REG;
9534 return OP_AMD64_XOR_MEMBASE_REG;
9537 return OP_AMD64_ADD_MEMBASE_IMM;
9540 return OP_AMD64_SUB_MEMBASE_IMM;
9543 return OP_AMD64_AND_MEMBASE_IMM;
9546 return OP_AMD64_OR_MEMBASE_IMM;
9549 return OP_AMD64_XOR_MEMBASE_IMM;
9559 op_to_op_store_membase (int store_opcode, int opcode)
9561 #if defined(__i386__) || defined(__x86_64__)
9564 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9565 return OP_X86_SETEQ_MEMBASE;
9567 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9568 return OP_X86_SETNE_MEMBASE;
9576 op_to_op_src1_membase (int load_opcode, int opcode)
9579 /* FIXME: This has sign extension issues */
9581 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9582 return OP_X86_COMPARE_MEMBASE8_IMM;
9585 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9590 return OP_X86_PUSH_MEMBASE;
9591 case OP_COMPARE_IMM:
9592 case OP_ICOMPARE_IMM:
9593 return OP_X86_COMPARE_MEMBASE_IMM;
9596 return OP_X86_COMPARE_MEMBASE_REG;
9601 /* FIXME: This has sign extension issues */
9603 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9604 return OP_X86_COMPARE_MEMBASE8_IMM;
9609 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9610 return OP_X86_PUSH_MEMBASE;
9612 /* FIXME: This only works for 32 bit immediates
9613 case OP_COMPARE_IMM:
9614 case OP_LCOMPARE_IMM:
9615 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9616 return OP_AMD64_COMPARE_MEMBASE_IMM;
9618 case OP_ICOMPARE_IMM:
9619 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9620 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9624 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9625 return OP_AMD64_COMPARE_MEMBASE_REG;
9628 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9629 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9638 op_to_op_src2_membase (int load_opcode, int opcode)
9641 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9647 return OP_X86_COMPARE_REG_MEMBASE;
9649 return OP_X86_ADD_REG_MEMBASE;
9651 return OP_X86_SUB_REG_MEMBASE;
9653 return OP_X86_AND_REG_MEMBASE;
9655 return OP_X86_OR_REG_MEMBASE;
9657 return OP_X86_XOR_REG_MEMBASE;
9664 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9665 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9669 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9670 return OP_AMD64_COMPARE_REG_MEMBASE;
9673 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9674 return OP_X86_ADD_REG_MEMBASE;
9676 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9677 return OP_X86_SUB_REG_MEMBASE;
9679 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9680 return OP_X86_AND_REG_MEMBASE;
9682 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9683 return OP_X86_OR_REG_MEMBASE;
9685 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9686 return OP_X86_XOR_REG_MEMBASE;
9688 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9689 return OP_AMD64_ADD_REG_MEMBASE;
9691 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9692 return OP_AMD64_SUB_REG_MEMBASE;
9694 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9695 return OP_AMD64_AND_REG_MEMBASE;
9697 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9698 return OP_AMD64_OR_REG_MEMBASE;
9700 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9701 return OP_AMD64_XOR_REG_MEMBASE;
9709 mono_op_to_op_imm_noemul (int opcode)
9712 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPTS)
9717 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9725 return mono_op_to_op_imm (opcode);
9730 * mono_handle_global_vregs:
9732 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9736 mono_handle_global_vregs (MonoCompile *cfg)
9742 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9744 /* Find local vregs used in more than one bb */
9745 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9746 MonoInst *ins = bb->code;
9747 int block_num = bb->block_num;
9749 if (cfg->verbose_level > 1)
9750 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9753 for (; ins; ins = ins->next) {
9754 const char *spec = INS_INFO (ins->opcode);
9755 int regtype, regindex;
9758 if (G_UNLIKELY (cfg->verbose_level > 1))
9759 mono_print_ins (ins);
9761 g_assert (ins->opcode >= MONO_CEE_LAST);
9763 for (regindex = 0; regindex < 3; regindex ++) {
9766 if (regindex == 0) {
9767 regtype = spec [MONO_INST_DEST];
9771 } else if (regindex == 1) {
9772 regtype = spec [MONO_INST_SRC1];
9777 regtype = spec [MONO_INST_SRC2];
9783 #if SIZEOF_VOID_P == 4
9784 if (regtype == 'l') {
9786 * Since some instructions reference the original long vreg,
9787 * and some reference the two component vregs, it is quite hard
9788 * to determine when it needs to be global. So be conservative.
9790 if (!get_vreg_to_inst (cfg, vreg)) {
9791 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9793 if (cfg->verbose_level > 1)
9794 printf ("LONG VREG R%d made global.\n", vreg);
9798 * Make the component vregs volatile since the optimizations can
9799 * get confused otherwise.
9801 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9802 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9806 g_assert (vreg != -1);
9808 prev_bb = vreg_to_bb [vreg];
9810 /* 0 is a valid block num */
9811 vreg_to_bb [vreg] = block_num + 1;
9812 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9813 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9816 if (!get_vreg_to_inst (cfg, vreg)) {
9817 if (G_UNLIKELY (cfg->verbose_level > 1))
9818 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9822 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9825 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9828 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9831 g_assert_not_reached ();
9835 /* Flag as having been used in more than one bb */
9836 vreg_to_bb [vreg] = -1;
9842 /* If a variable is used in only one bblock, convert it into a local vreg */
9843 for (i = 0; i < cfg->num_varinfo; i++) {
9844 MonoInst *var = cfg->varinfo [i];
9845 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9847 switch (var->type) {
9853 #if SIZEOF_VOID_P == 8
9856 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9857 /* Enabling this screws up the fp stack on x86 */
9860 /* Arguments are implicitly global */
9861 /* Putting R4 vars into registers doesn't work currently */
9862 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9864 * Make that the variable's liveness interval doesn't contain a call, since
9865 * that would cause the lvreg to be spilled, making the whole optimization
9868 /* This is too slow for JIT compilation */
9870 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9872 int def_index, call_index, ins_index;
9873 gboolean spilled = FALSE;
9878 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9879 const char *spec = INS_INFO (ins->opcode);
9881 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9882 def_index = ins_index;
9884 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9885 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9886 if (call_index > def_index) {
9892 if (MONO_IS_CALL (ins))
9893 call_index = ins_index;
9903 if (G_UNLIKELY (cfg->verbose_level > 2))
9904 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9905 var->flags |= MONO_INST_IS_DEAD;
9906 cfg->vreg_to_inst [var->dreg] = NULL;
9913 * Compress the varinfo and vars tables so the liveness computation is faster and
9914 * takes up less space.
9917 for (i = 0; i < cfg->num_varinfo; ++i) {
9918 MonoInst *var = cfg->varinfo [i];
9919 if (pos < i && cfg->locals_start == i)
9920 cfg->locals_start = pos;
9921 if (!(var->flags & MONO_INST_IS_DEAD)) {
9923 cfg->varinfo [pos] = cfg->varinfo [i];
9924 cfg->varinfo [pos]->inst_c0 = pos;
9925 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9926 cfg->vars [pos].idx = pos;
9927 #if SIZEOF_VOID_P == 4
9928 if (cfg->varinfo [pos]->type == STACK_I8) {
9929 /* Modify the two component vars too */
9932 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9933 var1->inst_c0 = pos;
9934 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9935 var1->inst_c0 = pos;
9942 cfg->num_varinfo = pos;
9943 if (cfg->locals_start > cfg->num_varinfo)
9944 cfg->locals_start = cfg->num_varinfo;
9948 * mono_spill_global_vars:
9950 * Generate spill code for variables which are not allocated to registers,
9951 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9952 * code is generated which could be optimized by the local optimization passes.
9955 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9960 guint32 *vreg_to_lvreg;
9962 guint32 i, lvregs_len;
9963 gboolean dest_has_lvreg = FALSE;
9964 guint32 stacktypes [128];
9966 *need_local_opts = FALSE;
9968 memset (spec2, 0, sizeof (spec2));
9970 /* FIXME: Move this function to mini.c */
9971 stacktypes ['i'] = STACK_PTR;
9972 stacktypes ['l'] = STACK_I8;
9973 stacktypes ['f'] = STACK_R8;
9975 #if SIZEOF_VOID_P == 4
9976 /* Create MonoInsts for longs */
9977 for (i = 0; i < cfg->num_varinfo; i++) {
9978 MonoInst *ins = cfg->varinfo [i];
9980 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9981 switch (ins->type) {
9982 #ifdef MONO_ARCH_SOFT_FLOAT
9988 g_assert (ins->opcode == OP_REGOFFSET);
9990 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
9992 tree->opcode = OP_REGOFFSET;
9993 tree->inst_basereg = ins->inst_basereg;
9994 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
9996 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
9998 tree->opcode = OP_REGOFFSET;
9999 tree->inst_basereg = ins->inst_basereg;
10000 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10010 /* FIXME: widening and truncation */
10013 * As an optimization, when a variable allocated to the stack is first loaded into
10014 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10015 * the variable again.
10017 orig_next_vreg = cfg->next_vreg;
10018 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10019 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10022 /* Add spill loads/stores */
10023 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10026 if (cfg->verbose_level > 1)
10027 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10029 /* Clear vreg_to_lvreg array */
10030 for (i = 0; i < lvregs_len; i++)
10031 vreg_to_lvreg [lvregs [i]] = 0;
10035 MONO_BB_FOR_EACH_INS (bb, ins) {
10036 const char *spec = INS_INFO (ins->opcode);
10037 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10038 gboolean store, no_lvreg;
10040 if (G_UNLIKELY (cfg->verbose_level > 1))
10041 mono_print_ins (ins);
10043 if (ins->opcode == OP_NOP)
10047 * We handle LDADDR here as well, since it can only be decomposed
10048 * when variable addresses are known.
10050 if (ins->opcode == OP_LDADDR) {
10051 MonoInst *var = ins->inst_p0;
10053 if (var->opcode == OP_VTARG_ADDR) {
10054 /* Happens on SPARC/S390 where vtypes are passed by reference */
10055 MonoInst *vtaddr = var->inst_left;
10056 if (vtaddr->opcode == OP_REGVAR) {
10057 ins->opcode = OP_MOVE;
10058 ins->sreg1 = vtaddr->dreg;
10060 else if (var->inst_left->opcode == OP_REGOFFSET) {
10061 ins->opcode = OP_LOAD_MEMBASE;
10062 ins->inst_basereg = vtaddr->inst_basereg;
10063 ins->inst_offset = vtaddr->inst_offset;
10067 g_assert (var->opcode == OP_REGOFFSET);
10069 ins->opcode = OP_ADD_IMM;
10070 ins->sreg1 = var->inst_basereg;
10071 ins->inst_imm = var->inst_offset;
10074 *need_local_opts = TRUE;
10075 spec = INS_INFO (ins->opcode);
10078 if (ins->opcode < MONO_CEE_LAST) {
10079 mono_print_ins (ins);
10080 g_assert_not_reached ();
10084 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10088 if (MONO_IS_STORE_MEMBASE (ins)) {
10089 tmp_reg = ins->dreg;
10090 ins->dreg = ins->sreg2;
10091 ins->sreg2 = tmp_reg;
10094 spec2 [MONO_INST_DEST] = ' ';
10095 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10096 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10098 } else if (MONO_IS_STORE_MEMINDEX (ins))
10099 g_assert_not_reached ();
10104 if (G_UNLIKELY (cfg->verbose_level > 1))
10105 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10110 regtype = spec [MONO_INST_DEST];
10111 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10114 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10115 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10116 MonoInst *store_ins;
10119 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10121 if (var->opcode == OP_REGVAR) {
10122 ins->dreg = var->dreg;
10123 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10125 * Instead of emitting a load+store, use a _membase opcode.
10127 g_assert (var->opcode == OP_REGOFFSET);
10128 if (ins->opcode == OP_MOVE) {
10131 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10132 ins->inst_basereg = var->inst_basereg;
10133 ins->inst_offset = var->inst_offset;
10136 spec = INS_INFO (ins->opcode);
10140 g_assert (var->opcode == OP_REGOFFSET);
10142 prev_dreg = ins->dreg;
10144 /* Invalidate any previous lvreg for this vreg */
10145 vreg_to_lvreg [ins->dreg] = 0;
10149 #ifdef MONO_ARCH_SOFT_FLOAT
10150 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10152 store_opcode = OP_STOREI8_MEMBASE_REG;
10156 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10158 if (regtype == 'l') {
10159 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10160 mono_bblock_insert_after_ins (bb, ins, store_ins);
10161 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10162 mono_bblock_insert_after_ins (bb, ins, store_ins);
10165 g_assert (store_opcode != OP_STOREV_MEMBASE);
10167 /* Try to fuse the store into the instruction itself */
10168 /* FIXME: Add more instructions */
10169 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10170 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10171 ins->inst_imm = ins->inst_c0;
10172 ins->inst_destbasereg = var->inst_basereg;
10173 ins->inst_offset = var->inst_offset;
10174 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10175 ins->opcode = store_opcode;
10176 ins->inst_destbasereg = var->inst_basereg;
10177 ins->inst_offset = var->inst_offset;
10181 tmp_reg = ins->dreg;
10182 ins->dreg = ins->sreg2;
10183 ins->sreg2 = tmp_reg;
10186 spec2 [MONO_INST_DEST] = ' ';
10187 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10188 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10190 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10191 // FIXME: The backends expect the base reg to be in inst_basereg
10192 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10194 ins->inst_basereg = var->inst_basereg;
10195 ins->inst_offset = var->inst_offset;
10196 spec = INS_INFO (ins->opcode);
10198 /* printf ("INS: "); mono_print_ins (ins); */
10199 /* Create a store instruction */
10200 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10202 /* Insert it after the instruction */
10203 mono_bblock_insert_after_ins (bb, ins, store_ins);
10206 * We can't assign ins->dreg to var->dreg here, since the
10207 * sregs could use it. So set a flag, and do it after
10210 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10211 dest_has_lvreg = TRUE;
10220 for (srcindex = 0; srcindex < 2; ++srcindex) {
10221 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10222 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10224 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10225 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10226 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10227 MonoInst *load_ins;
10228 guint32 load_opcode;
10230 if (var->opcode == OP_REGVAR) {
10232 ins->sreg1 = var->dreg;
10234 ins->sreg2 = var->dreg;
10238 g_assert (var->opcode == OP_REGOFFSET);
10240 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10242 g_assert (load_opcode != OP_LOADV_MEMBASE);
10244 if (vreg_to_lvreg [sreg]) {
10245 /* The variable is already loaded to an lvreg */
10246 if (G_UNLIKELY (cfg->verbose_level > 1))
10247 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10249 ins->sreg1 = vreg_to_lvreg [sreg];
10251 ins->sreg2 = vreg_to_lvreg [sreg];
10255 /* Try to fuse the load into the instruction */
10256 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10257 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10258 ins->inst_basereg = var->inst_basereg;
10259 ins->inst_offset = var->inst_offset;
10260 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10261 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10262 ins->sreg2 = var->inst_basereg;
10263 ins->inst_offset = var->inst_offset;
10265 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10266 ins->opcode = OP_NOP;
10269 //printf ("%d ", srcindex); mono_print_ins (ins);
10271 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10273 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10274 if (var->dreg == prev_dreg) {
10276 * sreg refers to the value loaded by the load
10277 * emitted below, but we need to use ins->dreg
10278 * since it refers to the store emitted earlier.
10282 vreg_to_lvreg [var->dreg] = sreg;
10283 g_assert (lvregs_len < 1024);
10284 lvregs [lvregs_len ++] = var->dreg;
10293 if (regtype == 'l') {
10294 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10295 mono_bblock_insert_before_ins (bb, ins, load_ins);
10296 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10297 mono_bblock_insert_before_ins (bb, ins, load_ins);
10300 #if SIZEOF_VOID_P == 4
10301 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10303 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10304 mono_bblock_insert_before_ins (bb, ins, load_ins);
10310 if (dest_has_lvreg) {
10311 vreg_to_lvreg [prev_dreg] = ins->dreg;
10312 g_assert (lvregs_len < 1024);
10313 lvregs [lvregs_len ++] = prev_dreg;
10314 dest_has_lvreg = FALSE;
10318 tmp_reg = ins->dreg;
10319 ins->dreg = ins->sreg2;
10320 ins->sreg2 = tmp_reg;
10323 if (MONO_IS_CALL (ins)) {
10324 /* Clear vreg_to_lvreg array */
10325 for (i = 0; i < lvregs_len; i++)
10326 vreg_to_lvreg [lvregs [i]] = 0;
10330 if (cfg->verbose_level > 1)
10331 mono_print_ins_index (1, ins);
10338 * - use 'iadd' instead of 'int_add'
10339 * - handling ovf opcodes: decompose in method_to_ir.
10340 * - unify iregs/fregs
10341 * -> partly done, the missing parts are:
10342 * - a more complete unification would involve unifying the hregs as well, so
10343 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10344 * would no longer map to the machine hregs, so the code generators would need to
10345 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10346 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10347 * fp/non-fp branches speeds it up by about 15%.
10348 * - use sext/zext opcodes instead of shifts
10350 * - get rid of TEMPLOADs if possible and use vregs instead
10351 * - clean up usage of OP_P/OP_ opcodes
10352 * - cleanup usage of DUMMY_USE
10353 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10355 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10356 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10357 * - make sure handle_stack_args () is called before the branch is emitted
10358 * - when the new IR is done, get rid of all unused stuff
10359 * - COMPARE/BEQ as separate instructions or unify them ?
10360 * - keeping them separate allows specialized compare instructions like
10361 * compare_imm, compare_membase
10362 * - most back ends unify fp compare+branch, fp compare+ceq
10363 * - integrate handle_stack_args into inline_method
10364 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10365 * - Things to backport to the old JIT:
10366 * - op_atomic_exchange fix for amd64
10367 * - localloc fix for amd64
10368 * - x86 type_token change
10370 * - long eq/ne optimizations
10371 * - handle long shift opts on 32 bit platforms somehow: they require
10372 * 3 sregs (2 for arg1 and 1 for arg2)
10373 * - make byref a 'normal' type.
10374 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10375 * variable if needed.
10376 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10377 * like inline_method.
10378 * - remove inlining restrictions
10379 * - remove mono_save_args.
10380 * - add 'introduce a new optimization to simplify some range checks'
10381 * - fix LNEG and enable cfold of INEG
10382 * - generalize x86 optimizations like ldelema as a peephole optimization
10383 * - add store_mem_imm for amd64
10384 * - optimize the loading of the interruption flag in the managed->native wrappers
10385 * - avoid special handling of OP_NOP in passes
10386 * - move code inserting instructions into one function/macro.
10387 * - cleanup the code replacement in decompose_long_opts ()
10388 * - try a coalescing phase after liveness analysis
10389 * - add float -> vreg conversion + local optimizations on !x86
10390 * - figure out how to handle decomposed branches during optimizations, ie.
10391 * compare+branch, op_jump_table+op_br etc.
10392 * - promote RuntimeXHandles to vregs
10393 * - vtype cleanups:
10394 * - add a NEW_VARLOADA_VREG macro
10395 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10396 * accessing vtype fields.
10397 * - get rid of I8CONST on 64 bit platforms
10398 * - dealing with the increase in code size due to branches created during opcode
10400 * - use extended basic blocks
10401 * - all parts of the JIT
10402 * - handle_global_vregs () && local regalloc
10403 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10404 * - sources of increase in code size:
10407 * - isinst and castclass
10408 * - lvregs not allocated to global registers even if used multiple times
10409 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10411 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10412 * - add all micro optimizations from the old JIT
10413 * - put tree optimizations into the deadce pass
10414 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10415 * specific function.
10416 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10417 * fcompare + branchCC.
10418 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10419 * running generics.exe.
10420 * - create a helper function for allocating a stack slot, taking into account
10421 * MONO_CFG_HAS_SPILLUP.
10422 * - merge new GC changes in mini.c.
10424 * - merge the ia64 switch changes.
10425 * - merge the mips conditional changes.
10426 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10427 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10428 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10429 * - optimize mono_regstate2_alloc_int/float.
10430 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10431 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10432 * parts of the tree could be separated by other instructions, killing the tree
10433 * arguments, or stores killing loads etc. Also, should we fold loads into other
10434 * instructions if the result of the load is used multiple times ?
10435 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10436 * - LAST MERGE: 108395.
10437 * - when returning vtypes in registers, generate IR and append it to the end of the
10438 * last bb instead of doing it in the epilog.
10439 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10440 * ones in inssel.h.
10441 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10449 - When to decompose opcodes:
10450 - earlier: this makes some optimizations hard to implement, since the low level IR
10451 no longer contains the neccessary information. But it is easier to do.
10452 - later: harder to implement, enables more optimizations.
10453 - Branches inside bblocks:
10454 - created when decomposing complex opcodes.
10455 - branches to another bblock: harmless, but not tracked by the branch
10456 optimizations, so need to branch to a label at the start of the bblock.
10457 - branches to inside the same bblock: very problematic, trips up the local
10458 reg allocator. Can be fixed by spitting the current bblock, but that is a
10459 complex operation, since some local vregs can become global vregs etc.
10460 - Local/global vregs:
10461 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10462 local register allocator.
10463 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10464 structure, created by mono_create_var (). Assigned to hregs or the stack by
10465 the global register allocator.
10466 - When to do optimizations like alu->alu_imm:
10467 - earlier -> saves work later on since the IR will be smaller/simpler
10468 - later -> can work on more instructions
10469 - Handling of valuetypes:
10470 - When a vtype is pushed on the stack, a new tempotary is created, an
10471 instruction computing its address (LDADDR) is emitted and pushed on
10472 the stack. Need to optimize cases when the vtype is used immediately as in
10473 argument passing, stloc etc.
10474 - Instead of the to_end stuff in the old JIT, simply call the function handling
10475 the values on the stack before emitting the last instruction of the bb.