2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > -1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 MonoMethodSignature *helper_sig_class_init_trampoline;
132 MonoMethodSignature *helper_sig_domain_get;
133 MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(ins, arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2177 if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call);
2196 inline static MonoInst*
2197 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2199 inline static MonoCallInst *
2200 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2201 MonoInst **args, int calli, int virtual)
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2208 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2211 call->signature = sig;
2213 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2215 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2216 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2219 temp->backend.is_pinvoke = sig->pinvoke;
2222 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2223 * address of return value to increase optimization opportunities.
2224 * Before vtype decomposition, the dreg of the call ins itself represents the
2225 * fact the call modifies the return value. After decomposition, the call will
2226 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2227 * will be transformed into an LDADDR.
2229 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2230 loada->dreg = alloc_preg (cfg);
2231 loada->inst_p0 = temp;
2232 /* We reference the call too since call->dreg could change during optimization */
2233 loada->inst_p1 = call;
2234 MONO_ADD_INS (cfg->cbb, loada);
2236 call->inst.dreg = temp->dreg;
2238 call->vret_var = loada;
2239 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2240 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2242 #ifdef MONO_ARCH_SOFT_FLOAT
2244 * If the call has a float argument, we would need to do an r8->r4 conversion using
2245 * an icall, but that cannot be done during the call sequence since it would clobber
2246 * the call registers + the stack. So we do it before emitting the call.
2248 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2250 MonoInst *in = call->args [i];
2252 if (i >= sig->hasthis)
2253 t = sig->params [i - sig->hasthis];
2255 t = &mono_defaults.int_class->byval_arg;
2256 t = mono_type_get_underlying_type (t);
2258 if (!t->byref && t->type == MONO_TYPE_R4) {
2259 MonoInst *iargs [1];
2263 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2265 /* The result will be in an int vreg */
2266 call->args [i] = conv;
2271 mono_arch_emit_call (cfg, call);
2273 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2274 cfg->flags |= MONO_CFG_HAS_CALLS;
2279 inline static MonoInst*
2280 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2282 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2284 call->inst.sreg1 = addr->dreg;
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2291 inline static MonoInst*
2292 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2294 #ifdef MONO_ARCH_RGCTX_REG
2296 int rgctx_reg = mono_alloc_preg (cfg);
2298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2299 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2300 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2301 return (MonoInst*)call;
2303 g_assert_not_reached ();
2309 mono_emit_imt_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2310 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2312 gboolean virtual = this != NULL;
2313 gboolean enable_for_aot = TRUE;
2316 if (method->string_ctor) {
2317 /* Create the real signature */
2318 /* FIXME: Cache these */
2319 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup (sig);
2320 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2325 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2327 if (this && sig->hasthis &&
2328 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2329 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2330 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2332 call->method = method;
2334 call->inst.flags |= MONO_INST_HAS_METHOD;
2335 call->inst.inst_left = this;
2338 int vtable_reg, slot_reg, this_reg;
2340 this_reg = this->dreg;
2342 if ((!cfg->compile_aot || enable_for_aot) &&
2343 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2344 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2345 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2347 * the method is not virtual, we just need to ensure this is not null
2348 * and then we can call the method directly.
2350 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2351 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2354 if (!method->string_ctor) {
2355 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2356 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2357 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2360 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2362 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2364 return (MonoInst*)call;
2367 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2368 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2369 /* Make a call to delegate->invoke_impl */
2370 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2371 call->inst.inst_basereg = this_reg;
2372 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2373 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2375 return (MonoInst*)call;
2379 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2380 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2381 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2383 * the method is virtual, but we can statically dispatch since either
2384 * it's class or the method itself are sealed.
2385 * But first we need to ensure it's not a null reference.
2387 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2388 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2391 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2399 /* Initialize method->slot */
2400 mono_class_setup_vtable (method->klass);
2402 vtable_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2404 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2406 #ifdef MONO_ARCH_HAVE_IMT
2408 guint32 imt_slot = mono_method_get_imt_slot (method);
2409 emit_imt_argument (cfg, call, imt_arg);
2410 slot_reg = vtable_reg;
2411 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2414 if (slot_reg == -1) {
2415 slot_reg = alloc_preg (cfg);
2416 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2417 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2420 slot_reg = vtable_reg;
2421 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2424 call->inst.sreg1 = slot_reg;
2425 call->virtual = TRUE;
2428 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2430 return (MonoInst*)call;
2433 static inline MonoInst*
2434 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2435 MonoInst **args, MonoInst *this)
2437 return mono_emit_imt_method_call (cfg, method, sig, args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 inline static MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 get_memcpy_method (void)
2469 static MonoMethod *memcpy_method = NULL;
2470 if (!memcpy_method) {
2471 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2473 g_error ("Old corlib found. Install a new one");
2475 return memcpy_method;
2479 * Emit code to copy a valuetype of type @klass whose address is stored in
2480 * @src->dreg to memory whose address is stored at @dest->dreg.
2483 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2485 MonoInst *iargs [3];
2488 MonoMethod *memcpy_method;
2492 * This check breaks with spilled vars... need to handle it during verification anyway.
2493 * g_assert (klass && klass == src->klass && klass == dest->klass);
2497 n = mono_class_native_size (klass, &align);
2499 n = mono_class_value_size (klass, &align);
2501 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2502 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2503 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2507 EMIT_NEW_ICONST (cfg, iargs [2], n);
2509 memcpy_method = get_memcpy_method ();
2510 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
2515 get_memset_method (void)
2517 static MonoMethod *memset_method = NULL;
2518 if (!memset_method) {
2519 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2521 g_error ("Old corlib found. Install a new one");
2523 return memset_method;
2527 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2529 MonoInst *iargs [3];
2532 MonoMethod *memset_method;
2534 /* FIXME: Optimize this for the case when dest is an LDADDR */
2536 mono_class_init (klass);
2537 n = mono_class_value_size (klass, &align);
2539 if (n <= sizeof (gpointer) * 5) {
2540 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2543 memset_method = get_memset_method ();
2545 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2546 EMIT_NEW_ICONST (cfg, iargs [2], n);
2547 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
2552 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used, MonoInst *this)
2554 g_assert (!method->klass->valuetype);
2556 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2557 MonoInst *mrgctx_loc, *mrgctx_var;
2560 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2562 mrgctx_loc = mono_get_vtable_var (cfg);
2563 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2566 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2567 MonoInst *vtable_loc, *vtable_var;
2571 vtable_loc = mono_get_vtable_var (cfg);
2572 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2574 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2575 MonoInst *mrgctx_var = vtable_var;
2578 vtable_reg = alloc_preg (cfg);
2579 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2580 vtable_var->type = STACK_PTR;
2586 int vtable_reg, res_reg;
2588 vtable_reg = alloc_preg (cfg);
2589 res_reg = alloc_preg (cfg);
2590 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2595 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2596 MonoInst *this = NULL; \
2597 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2598 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !((context_used) & MONO_GENERIC_CONTEXT_USED_METHOD)) \
2599 EMIT_NEW_ARGLOAD (cfg, this, 0); \
2600 (rgctx) = emit_get_rgctx (cfg, method, (context_used), this); \
2604 emit_get_rgctx_other_table_ptr (MonoCompile *cfg, MonoInst *rgc_ptr, int slot)
2606 MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
2607 guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
2609 return mono_emit_native_call (cfg, tramp, sig, &rgc_ptr);
2613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2614 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2616 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2617 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, cfg->generic_context);
2619 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2623 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2624 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2626 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2627 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, cmethod, rgctx_type, cfg->generic_context);
2629 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2633 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2634 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2636 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2637 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, field, rgctx_type, cfg->generic_context);
2639 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2643 emit_get_rgctx_method_rgctx (MonoCompile *cfg, int context_used,
2644 MonoInst *rgctx, MonoMethod *rgctx_method)
2646 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2647 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
2648 MONO_RGCTX_INFO_METHOD_RGCTX, cfg->generic_context);
2650 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2654 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2658 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2660 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2661 // Can't encode method ref
2662 cfg->disable_aot = TRUE;
2665 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2666 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2668 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2670 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2675 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2679 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2680 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2681 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2682 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2684 obj_reg = sp [0]->dreg;
2685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2688 /* FIXME: generics */
2689 g_assert (klass->rank == 0);
2692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2693 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2699 MonoInst *element_class;
2701 /* This assertion is from the unboxcast insn */
2702 g_assert (klass->rank == 0);
2704 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2705 klass->element_class, MONO_RGCTX_INFO_KLASS);
2707 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2710 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2713 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2714 MONO_ADD_INS (cfg->cbb, add);
2715 add->type = STACK_MP;
2722 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2724 MonoInst *iargs [2];
2727 if (cfg->opt & MONO_OPT_SHARED) {
2728 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2729 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2731 alloc_ftn = mono_object_new;
2732 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2733 /* This happens often in argument checking code, eg. throw new FooException... */
2734 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2735 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2736 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2738 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2739 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2742 if (managed_alloc) {
2743 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2744 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, NULL);
2746 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2748 guint32 lw = vtable->klass->instance_size;
2749 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2750 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2751 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2754 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2758 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2762 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2765 MonoInst *iargs [2];
2766 MonoMethod *managed_alloc = NULL;
2770 FIXME: we cannot get managed_alloc here because we can't get
2771 the class's vtable (because it's not a closed class)
2773 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2774 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2777 if (cfg->opt & MONO_OPT_SHARED) {
2778 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2779 iargs [1] = data_inst;
2780 alloc_ftn = mono_object_new;
2782 g_assert (!cfg->compile_aot);
2784 if (managed_alloc) {
2785 iargs [0] = data_inst;
2786 return mono_emit_method_call (cfg, managed_alloc,
2787 mono_method_signature (managed_alloc), iargs, NULL);
2790 iargs [0] = data_inst;
2791 alloc_ftn = mono_object_new_specific;
2794 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2798 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2800 MonoInst *alloc, *ins;
2802 if (mono_class_is_nullable (klass)) {
2803 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2804 // Can't encode method ref
2805 cfg->disable_aot = TRUE;
2806 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2809 alloc = handle_alloc (cfg, klass, TRUE);
2811 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2817 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, MonoInst *data_inst)
2819 MonoInst *alloc, *ins;
2821 g_assert (!mono_class_is_nullable (klass));
2823 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2825 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2831 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2833 MonoBasicBlock *is_null_bb;
2834 int obj_reg = src->dreg;
2835 int vtable_reg = alloc_preg (cfg);
2837 NEW_BBLOCK (cfg, is_null_bb);
2839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2842 if (mini_get_debug_options ()->better_cast_details) {
2843 int to_klass_reg = alloc_preg (cfg);
2844 int klass_reg = alloc_preg (cfg);
2845 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2848 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2852 MONO_ADD_INS (cfg->cbb, tls_get);
2853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2857 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2861 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2863 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2865 int klass_reg = alloc_preg (cfg);
2867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2869 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2870 /* the remoting code is broken, access the class for now */
2872 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2878 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2881 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2885 MONO_START_BB (cfg, is_null_bb);
2887 /* Reset the variables holding the cast details */
2888 if (mini_get_debug_options ()->better_cast_details) {
2889 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2891 MONO_ADD_INS (cfg->cbb, tls_get);
2892 /* It is enough to reset the from field */
2893 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2900 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2903 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2904 int obj_reg = src->dreg;
2905 int vtable_reg = alloc_preg (cfg);
2906 int res_reg = alloc_preg (cfg);
2908 NEW_BBLOCK (cfg, is_null_bb);
2909 NEW_BBLOCK (cfg, false_bb);
2910 NEW_BBLOCK (cfg, end_bb);
2912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2913 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2915 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2917 /* the is_null_bb target simply copies the input register to the output */
2918 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2920 int klass_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2925 int rank_reg = alloc_preg (cfg);
2926 int eclass_reg = alloc_preg (cfg);
2928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2929 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2932 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2933 if (klass->cast_class == mono_defaults.object_class) {
2934 int parent_reg = alloc_preg (cfg);
2935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2936 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2937 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2939 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2940 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2941 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2943 } else if (klass->cast_class == mono_defaults.enum_class) {
2944 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2946 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2947 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2949 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2950 /* Check that the object is a vector too */
2951 int bounds_reg = alloc_preg (cfg);
2952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2957 /* the is_null_bb target simply copies the input register to the output */
2958 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2960 } else if (mono_class_is_nullable (klass)) {
2961 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2962 /* the is_null_bb target simply copies the input register to the output */
2963 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2965 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2966 /* the remoting code is broken, access the class for now */
2968 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2974 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2975 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2977 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2978 /* the is_null_bb target simply copies the input register to the output */
2979 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2984 MONO_START_BB (cfg, false_bb);
2986 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2989 MONO_START_BB (cfg, is_null_bb);
2991 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2992 ins->type = STACK_OBJ;
2995 MONO_START_BB (cfg, end_bb);
3001 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3003 /* This opcode takes as input an object reference and a class, and returns:
3004 0) if the object is an instance of the class,
3005 1) if the object is not instance of the class,
3006 2) if the object is a proxy whose type cannot be determined */
3009 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3010 int obj_reg = src->dreg;
3011 int dreg = alloc_ireg (cfg);
3013 int klass_reg = alloc_preg (cfg);
3015 NEW_BBLOCK (cfg, true_bb);
3016 NEW_BBLOCK (cfg, false_bb);
3017 NEW_BBLOCK (cfg, false2_bb);
3018 NEW_BBLOCK (cfg, end_bb);
3019 NEW_BBLOCK (cfg, no_proxy_bb);
3021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3022 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3024 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3025 NEW_BBLOCK (cfg, interface_fail_bb);
3027 tmp_reg = alloc_preg (cfg);
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3029 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3030 MONO_START_BB (cfg, interface_fail_bb);
3031 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3033 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3035 tmp_reg = alloc_preg (cfg);
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3040 tmp_reg = alloc_preg (cfg);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3042 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3044 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3045 tmp_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3049 tmp_reg = alloc_preg (cfg);
3050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3051 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3052 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3054 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3057 MONO_START_BB (cfg, no_proxy_bb);
3059 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3062 MONO_START_BB (cfg, false_bb);
3064 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3067 MONO_START_BB (cfg, false2_bb);
3069 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3070 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3072 MONO_START_BB (cfg, true_bb);
3074 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3076 MONO_START_BB (cfg, end_bb);
3079 MONO_INST_NEW (cfg, ins, OP_ICONST);
3081 ins->type = STACK_I4;
3087 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3089 /* This opcode takes as input an object reference and a class, and returns:
3090 0) if the object is an instance of the class,
3091 1) if the object is a proxy whose type cannot be determined
3092 an InvalidCastException exception is thrown otherwhise*/
3095 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3096 int obj_reg = src->dreg;
3097 int dreg = alloc_ireg (cfg);
3098 int tmp_reg = alloc_preg (cfg);
3099 int klass_reg = alloc_preg (cfg);
3101 NEW_BBLOCK (cfg, end_bb);
3102 NEW_BBLOCK (cfg, ok_result_bb);
3104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3105 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3107 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3108 NEW_BBLOCK (cfg, interface_fail_bb);
3110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3111 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3112 MONO_START_BB (cfg, interface_fail_bb);
3113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3115 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3117 tmp_reg = alloc_preg (cfg);
3118 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3120 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3122 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3126 NEW_BBLOCK (cfg, no_proxy_bb);
3128 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3130 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3132 tmp_reg = alloc_preg (cfg);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3136 tmp_reg = alloc_preg (cfg);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3141 NEW_BBLOCK (cfg, fail_1_bb);
3143 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3145 MONO_START_BB (cfg, fail_1_bb);
3147 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3150 MONO_START_BB (cfg, no_proxy_bb);
3152 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3155 MONO_START_BB (cfg, ok_result_bb);
3157 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3159 MONO_START_BB (cfg, end_bb);
3162 MONO_INST_NEW (cfg, ins, OP_ICONST);
3164 ins->type = STACK_I4;
3169 static G_GNUC_UNUSED MonoInst*
3170 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3172 gpointer *trampoline;
3173 MonoInst *obj, *method_ins, *tramp_ins;
3177 obj = handle_alloc (cfg, klass, FALSE);
3179 /* Inline the contents of mono_delegate_ctor */
3181 /* Set target field */
3182 /* Optimize away setting of NULL target */
3183 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3184 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3186 /* Set method field */
3187 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3188 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3191 * To avoid looking up the compiled code belonging to the target method
3192 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3193 * store it, and we fill it after the method has been compiled.
3195 if (!cfg->compile_aot) {
3196 MonoInst *code_slot_ins;
3198 domain = mono_domain_get ();
3199 mono_domain_lock (domain);
3200 if (!domain->method_code_hash)
3201 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3202 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3204 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3205 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3207 mono_domain_unlock (domain);
3209 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3210 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3213 /* Set invoke_impl field */
3214 trampoline = mono_create_delegate_trampoline (klass);
3215 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3216 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3218 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3224 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3226 MonoJitICallInfo *info;
3228 /* Need to register the icall so it gets an icall wrapper */
3229 info = mono_get_array_new_va_icall (rank);
3231 cfg->flags |= MONO_CFG_HAS_VARARGS;
3233 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3234 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3238 mono_emit_load_got_addr (MonoCompile *cfg)
3240 MonoInst *getaddr, *dummy_use;
3242 if (!cfg->got_var || cfg->got_var_allocated)
3245 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3246 getaddr->dreg = cfg->got_var->dreg;
3248 /* Add it to the start of the first bblock */
3249 if (cfg->bb_entry->code) {
3250 getaddr->next = cfg->bb_entry->code;
3251 cfg->bb_entry->code = getaddr;
3254 MONO_ADD_INS (cfg->bb_entry, getaddr);
3256 cfg->got_var_allocated = TRUE;
3259 * Add a dummy use to keep the got_var alive, since real uses might
3260 * only be generated by the back ends.
3261 * Add it to end_bblock, so the variable's lifetime covers the whole
3263 * It would be better to make the usage of the got var explicit in all
3264 * cases when the backend needs it (i.e. calls, throw etc.), so this
3265 * wouldn't be needed.
3267 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3268 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3271 #define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
3274 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3276 MonoMethodHeader *header = mono_method_get_header (method);
3278 #ifdef MONO_ARCH_SOFT_FLOAT
3279 MonoMethodSignature *sig = mono_method_signature (method);
3283 if (cfg->generic_sharing_context)
3286 #ifdef MONO_ARCH_HAVE_LMF_OPS
3287 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3288 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3289 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3293 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3294 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3295 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3296 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3297 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3298 (method->klass->marshalbyref) ||
3299 !header || header->num_clauses)
3302 /* also consider num_locals? */
3303 /* Do the size check early to avoid creating vtables */
3304 if (getenv ("MONO_INLINELIMIT")) {
3305 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3308 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3312 * if we can initialize the class of the method right away, we do,
3313 * otherwise we don't allow inlining if the class needs initialization,
3314 * since it would mean inserting a call to mono_runtime_class_init()
3315 * inside the inlined code
3317 if (!(cfg->opt & MONO_OPT_SHARED)) {
3318 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3319 if (cfg->run_cctors && method->klass->has_cctor) {
3320 if (!method->klass->runtime_info)
3321 /* No vtable created yet */
3323 vtable = mono_class_vtable (cfg->domain, method->klass);
3326 /* This makes so that inline cannot trigger */
3327 /* .cctors: too many apps depend on them */
3328 /* running with a specific order... */
3329 if (! vtable->initialized)
3331 mono_runtime_class_init (vtable);
3333 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3334 if (!method->klass->runtime_info)
3335 /* No vtable created yet */
3337 vtable = mono_class_vtable (cfg->domain, method->klass);
3340 if (!vtable->initialized)
3345 * If we're compiling for shared code
3346 * the cctor will need to be run at aot method load time, for example,
3347 * or at the end of the compilation of the inlining method.
3349 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3354 * CAS - do not inline methods with declarative security
3355 * Note: this has to be before any possible return TRUE;
3357 if (mono_method_has_declsec (method))
3360 #ifdef MONO_ARCH_SOFT_FLOAT
3362 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3364 for (i = 0; i < sig->param_count; ++i)
3365 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3373 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3375 if (vtable->initialized && !cfg->compile_aot)
3378 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3381 if (!mono_class_needs_cctor_run (vtable->klass, method))
3384 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3385 /* The initialization is already done before the method is called */
3392 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3396 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3398 mono_class_init (klass);
3399 size = mono_class_array_element_size (klass);
3401 mult_reg = alloc_preg (cfg);
3402 array_reg = arr->dreg;
3403 index_reg = index->dreg;
3405 #if SIZEOF_VOID_P == 8
3406 /* The array reg is 64 bits but the index reg is only 32 */
3407 index2_reg = alloc_preg (cfg);
3408 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3410 index2_reg = index_reg;
3413 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3415 #if defined(__i386__) || defined(__x86_64__)
3416 if (size == 1 || size == 2 || size == 4 || size == 8) {
3417 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3419 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3420 ins->type = STACK_PTR;
3426 add_reg = alloc_preg (cfg);
3428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3429 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3430 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3431 ins->type = STACK_PTR;
3432 MONO_ADD_INS (cfg->cbb, ins);
3437 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3439 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3441 int bounds_reg = alloc_preg (cfg);
3442 int add_reg = alloc_preg (cfg);
3443 int mult_reg = alloc_preg (cfg);
3444 int mult2_reg = alloc_preg (cfg);
3445 int low1_reg = alloc_preg (cfg);
3446 int low2_reg = alloc_preg (cfg);
3447 int high1_reg = alloc_preg (cfg);
3448 int high2_reg = alloc_preg (cfg);
3449 int realidx1_reg = alloc_preg (cfg);
3450 int realidx2_reg = alloc_preg (cfg);
3451 int sum_reg = alloc_preg (cfg);
3456 mono_class_init (klass);
3457 size = mono_class_array_element_size (klass);
3459 index1 = index_ins1->dreg;
3460 index2 = index_ins2->dreg;
3462 /* range checking */
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3464 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3466 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3467 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3468 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3469 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3470 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3471 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3472 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3474 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3475 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3476 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3478 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3479 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3480 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3482 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3483 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3486 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3488 ins->type = STACK_MP;
3490 MONO_ADD_INS (cfg->cbb, ins);
3497 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3501 MonoMethod *addr_method;
3504 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3507 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3509 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3510 /* emit_ldelema_2 depends on OP_LMUL */
3511 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3512 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3516 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3517 addr_method = mono_marshal_get_array_address (rank, element_size);
3518 addr = mono_emit_method_call (cfg, addr_method, addr_method->signature, sp, NULL);
3524 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3526 MonoInst *ins = NULL;
3528 static MonoClass *runtime_helpers_class = NULL;
3529 if (! runtime_helpers_class)
3530 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3531 "System.Runtime.CompilerServices", "RuntimeHelpers");
3533 if (cmethod->klass == mono_defaults.string_class) {
3534 if (strcmp (cmethod->name, "get_Chars") == 0) {
3535 int dreg = alloc_ireg (cfg);
3536 int index_reg = alloc_preg (cfg);
3537 int mult_reg = alloc_preg (cfg);
3538 int add_reg = alloc_preg (cfg);
3540 #if SIZEOF_VOID_P == 8
3541 /* The array reg is 64 bits but the index reg is only 32 */
3542 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3544 index_reg = args [1]->dreg;
3546 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3548 #if defined(__i386__) || defined(__x86_64__)
3549 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3550 add_reg = ins->dreg;
3551 /* Avoid a warning */
3553 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3558 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3559 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3561 type_from_op (ins, NULL, NULL);
3563 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3564 int dreg = alloc_ireg (cfg);
3565 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3566 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3567 type_from_op (ins, NULL, NULL);
3570 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3571 int mult_reg = alloc_preg (cfg);
3572 int add_reg = alloc_preg (cfg);
3574 /* The corlib functions check for oob already. */
3575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3576 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3577 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3580 } else if (cmethod->klass == mono_defaults.object_class) {
3582 if (strcmp (cmethod->name, "GetType") == 0) {
3583 int dreg = alloc_preg (cfg);
3584 int vt_reg = alloc_preg (cfg);
3585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3586 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3587 type_from_op (ins, NULL, NULL);
3590 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3591 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3592 int dreg = alloc_ireg (cfg);
3593 int t1 = alloc_ireg (cfg);
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3596 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3597 ins->type = STACK_I4;
3601 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3602 MONO_INST_NEW (cfg, ins, OP_NOP);
3603 MONO_ADD_INS (cfg->cbb, ins);
3607 } else if (cmethod->klass == mono_defaults.array_class) {
3608 if (cmethod->name [0] != 'g')
3611 if (strcmp (cmethod->name, "get_Rank") == 0) {
3612 int dreg = alloc_ireg (cfg);
3613 int vtable_reg = alloc_preg (cfg);
3614 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3615 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3616 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3617 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3618 type_from_op (ins, NULL, NULL);
3621 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3622 int dreg = alloc_ireg (cfg);
3624 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3625 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3626 type_from_op (ins, NULL, NULL);
3631 } else if (cmethod->klass == runtime_helpers_class) {
3633 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3634 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3638 } else if (cmethod->klass == mono_defaults.thread_class) {
3639 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3640 ins->dreg = alloc_preg (cfg);
3641 ins->type = STACK_OBJ;
3642 MONO_ADD_INS (cfg->cbb, ins);
3644 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3645 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3646 MONO_ADD_INS (cfg->cbb, ins);
3649 } else if (mini_class_is_system_array (cmethod->klass) &&
3650 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3651 MonoInst *addr, *store, *load;
3652 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3654 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3655 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3656 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3658 } else if (cmethod->klass->image == mono_defaults.corlib &&
3659 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3660 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3663 #if SIZEOF_VOID_P == 8
3664 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3665 /* 64 bit reads are already atomic */
3666 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3667 ins->dreg = mono_alloc_preg (cfg);
3668 ins->inst_basereg = args [0]->dreg;
3669 ins->inst_offset = 0;
3670 MONO_ADD_INS (cfg->cbb, ins);
3674 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3675 if (strcmp (cmethod->name, "Increment") == 0) {
3676 MonoInst *ins_iconst;
3679 if (fsig->params [0]->type == MONO_TYPE_I4)
3680 opcode = OP_ATOMIC_ADD_NEW_I4;
3681 #if SIZEOF_VOID_P == 8
3682 else if (fsig->params [0]->type == MONO_TYPE_I8)
3683 opcode = OP_ATOMIC_ADD_NEW_I8;
3686 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3687 ins_iconst->inst_c0 = 1;
3688 ins_iconst->dreg = mono_alloc_ireg (cfg);
3689 MONO_ADD_INS (cfg->cbb, ins_iconst);
3691 MONO_INST_NEW (cfg, ins, opcode);
3692 ins->dreg = mono_alloc_ireg (cfg);
3693 ins->inst_basereg = args [0]->dreg;
3694 ins->inst_offset = 0;
3695 ins->sreg2 = ins_iconst->dreg;
3696 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3697 MONO_ADD_INS (cfg->cbb, ins);
3699 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3700 MonoInst *ins_iconst;
3703 if (fsig->params [0]->type == MONO_TYPE_I4)
3704 opcode = OP_ATOMIC_ADD_NEW_I4;
3705 #if SIZEOF_VOID_P == 8
3706 else if (fsig->params [0]->type == MONO_TYPE_I8)
3707 opcode = OP_ATOMIC_ADD_NEW_I8;
3710 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3711 ins_iconst->inst_c0 = -1;
3712 ins_iconst->dreg = mono_alloc_ireg (cfg);
3713 MONO_ADD_INS (cfg->cbb, ins_iconst);
3715 MONO_INST_NEW (cfg, ins, opcode);
3716 ins->dreg = mono_alloc_ireg (cfg);
3717 ins->inst_basereg = args [0]->dreg;
3718 ins->inst_offset = 0;
3719 ins->sreg2 = ins_iconst->dreg;
3720 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3721 MONO_ADD_INS (cfg->cbb, ins);
3723 } else if (strcmp (cmethod->name, "Add") == 0) {
3726 if (fsig->params [0]->type == MONO_TYPE_I4)
3727 opcode = OP_ATOMIC_ADD_NEW_I4;
3728 #if SIZEOF_VOID_P == 8
3729 else if (fsig->params [0]->type == MONO_TYPE_I8)
3730 opcode = OP_ATOMIC_ADD_NEW_I8;
3734 MONO_INST_NEW (cfg, ins, opcode);
3735 ins->dreg = mono_alloc_ireg (cfg);
3736 ins->inst_basereg = args [0]->dreg;
3737 ins->inst_offset = 0;
3738 ins->sreg2 = args [1]->dreg;
3739 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3740 MONO_ADD_INS (cfg->cbb, ins);
3743 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3745 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3746 if (strcmp (cmethod->name, "Exchange") == 0) {
3749 if (fsig->params [0]->type == MONO_TYPE_I4)
3750 opcode = OP_ATOMIC_EXCHANGE_I4;
3751 #if SIZEOF_VOID_P == 8
3752 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3753 (fsig->params [0]->type == MONO_TYPE_I) ||
3754 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3755 opcode = OP_ATOMIC_EXCHANGE_I8;
3757 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3758 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3759 opcode = OP_ATOMIC_EXCHANGE_I4;
3764 MONO_INST_NEW (cfg, ins, opcode);
3765 ins->dreg = mono_alloc_ireg (cfg);
3766 ins->inst_basereg = args [0]->dreg;
3767 ins->inst_offset = 0;
3768 ins->sreg2 = args [1]->dreg;
3769 MONO_ADD_INS (cfg->cbb, ins);
3771 switch (fsig->params [0]->type) {
3773 ins->type = STACK_I4;
3777 ins->type = STACK_I8;
3779 case MONO_TYPE_OBJECT:
3780 ins->type = STACK_OBJ;
3783 g_assert_not_reached ();
3786 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3788 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3790 * Can't implement CompareExchange methods this way since they have
3791 * three arguments. We can implement one of the common cases, where the new
3792 * value is a constant.
3794 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3795 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3796 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3797 ins->dreg = alloc_ireg (cfg);
3798 ins->sreg1 = args [0]->dreg;
3799 ins->sreg2 = args [1]->dreg;
3800 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3801 ins->type = STACK_I4;
3802 MONO_ADD_INS (cfg->cbb, ins);
3804 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3806 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3810 } else if (cmethod->klass->image == mono_defaults.corlib) {
3811 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3812 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3813 MONO_INST_NEW (cfg, ins, OP_BREAK);
3814 MONO_ADD_INS (cfg->cbb, ins);
3817 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3818 && strcmp (cmethod->klass->name, "Environment") == 0) {
3819 #ifdef PLATFORM_WIN32
3820 EMIT_NEW_ICONST (cfg, ins, 1);
3822 EMIT_NEW_ICONST (cfg, ins, 0);
3826 } else if (cmethod->klass == mono_defaults.math_class) {
3828 * There is general branches code for Min/Max, but it does not work for
3830 * http://everything2.com/?node_id=1051618
3834 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3838 * This entry point could be used later for arbitrary method
3841 inline static MonoInst*
3842 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3843 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3845 if (method->klass == mono_defaults.string_class) {
3846 /* managed string allocation support */
3847 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3848 MonoInst *iargs [2];
3849 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3850 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3853 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3854 iargs [1] = args [0];
3855 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, this);
3862 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp, MonoInst **args)
3864 MonoInst *store, *temp;
3867 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3868 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3871 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3872 * would be different than the MonoInst's used to represent arguments, and
3873 * the ldelema implementation can't deal with that.
3874 * Solution: When ldelema is used on an inline argument, create a var for
3875 * it, emit ldelema on that var, and emit the saving code below in
3876 * inline_method () if needed.
3878 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3880 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, *sp);
3881 store->cil_code = sp [0]->cil_code;
3886 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3887 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3889 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3891 mono_inline_called_method_name_limit = NULL;
3892 static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) {
3893 char *called_method_name = mono_method_full_name (called_method, TRUE);
3896 if (mono_inline_called_method_name_limit == NULL) {
3897 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3898 if (limit_string != NULL) {
3899 mono_inline_called_method_name_limit = limit_string;
3901 mono_inline_called_method_name_limit = (char *) "";
3905 strncmp_result = strncmp (called_method_name, mono_inline_called_method_name_limit, strlen (mono_inline_called_method_name_limit));
3906 g_free (called_method_name);
3908 //return (strncmp_result <= 0);
3909 return (strncmp_result == 0);
3913 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3915 mono_inline_caller_method_name_limit = NULL;
3916 static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) {
3917 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3920 if (mono_inline_caller_method_name_limit == NULL) {
3921 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3922 if (limit_string != NULL) {
3923 mono_inline_caller_method_name_limit = limit_string;
3925 mono_inline_caller_method_name_limit = (char *) "";
3929 strncmp_result = strncmp (caller_method_name, mono_inline_caller_method_name_limit, strlen (mono_inline_caller_method_name_limit));
3930 g_free (caller_method_name);
3932 //return (strncmp_result <= 0);
3933 return (strncmp_result == 0);
3938 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3939 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3941 MonoInst *ins, *rvar = NULL;
3942 MonoMethodHeader *cheader;
3943 MonoBasicBlock *ebblock, *sbblock;
3945 MonoMethod *prev_inlined_method;
3946 MonoInst **prev_locals, **prev_args;
3947 guint prev_real_offset;
3948 GHashTable *prev_cbb_hash;
3949 MonoBasicBlock **prev_cil_offset_to_bb;
3950 MonoBasicBlock *prev_cbb;
3951 unsigned char* prev_cil_start;
3952 guint32 prev_cil_offset_to_bb_len;
3953 MonoMethod *prev_current_method;
3954 MonoGenericContext *prev_generic_context;
3956 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3958 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3959 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3962 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3963 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3967 if (cfg->verbose_level > 2)
3968 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3970 if (!cmethod->inline_info) {
3971 mono_jit_stats.inlineable_methods++;
3972 cmethod->inline_info = 1;
3974 /* allocate space to store the return value */
3975 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3976 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3979 /* allocate local variables */
3980 cheader = mono_method_get_header (cmethod);
3981 prev_locals = cfg->locals;
3982 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3983 for (i = 0; i < cheader->num_locals; ++i)
3984 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3986 prev_args = cfg->args;
3988 /* allocate start and end blocks */
3989 /* This is needed so if the inline is aborted, we can clean up */
3990 NEW_BBLOCK (cfg, sbblock);
3991 sbblock->real_offset = real_offset;
3993 NEW_BBLOCK (cfg, ebblock);
3994 ebblock->block_num = cfg->num_bblocks++;
3995 ebblock->real_offset = real_offset;
3997 prev_inlined_method = cfg->inlined_method;
3998 cfg->inlined_method = cmethod;
3999 cfg->ret_var_set = FALSE;
4000 prev_real_offset = cfg->real_offset;
4001 prev_cbb_hash = cfg->cbb_hash;
4002 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4003 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4004 prev_cil_start = cfg->cil_start;
4005 prev_cbb = cfg->cbb;
4006 prev_current_method = cfg->current_method;
4007 prev_generic_context = cfg->generic_context;
4009 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4011 cfg->inlined_method = prev_inlined_method;
4012 cfg->real_offset = prev_real_offset;
4013 cfg->cbb_hash = prev_cbb_hash;
4014 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4015 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4016 cfg->cil_start = prev_cil_start;
4017 cfg->locals = prev_locals;
4018 cfg->args = prev_args;
4019 cfg->current_method = prev_current_method;
4020 cfg->generic_context = prev_generic_context;
4022 if ((costs >= 0 && costs < 60) || inline_allways) {
4023 if (cfg->verbose_level > 2)
4024 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4026 mono_jit_stats.inlined_methods++;
4028 /* always add some code to avoid block split failures */
4029 MONO_INST_NEW (cfg, ins, OP_NOP);
4030 MONO_ADD_INS (prev_cbb, ins);
4032 prev_cbb->next_bb = sbblock;
4033 link_bblock (cfg, prev_cbb, sbblock);
4036 * Get rid of the begin and end bblocks if possible to aid local
4039 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4041 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4042 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4044 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4045 MonoBasicBlock *prev = ebblock->in_bb [0];
4046 mono_merge_basic_blocks (cfg, prev, ebblock);
4054 * If the inlined method contains only a throw, then the ret var is not
4055 * set, so set it to a dummy value.
4057 if (!cfg->ret_var_set) {
4058 static double r8_0 = 0.0;
4060 switch (rvar->type) {
4062 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4065 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4070 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4073 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4074 ins->type = STACK_R8;
4075 ins->inst_p0 = (void*)&r8_0;
4076 ins->dreg = rvar->dreg;
4077 MONO_ADD_INS (cfg->cbb, ins);
4080 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4083 g_assert_not_reached ();
4087 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4092 if (cfg->verbose_level > 2)
4093 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4094 cfg->exception_type = MONO_EXCEPTION_NONE;
4095 mono_loader_clear_error ();
4097 /* This gets rid of the newly added bblocks */
4098 cfg->cbb = prev_cbb;
4104 * Some of these comments may well be out-of-date.
4105 * Design decisions: we do a single pass over the IL code (and we do bblock
4106 * splitting/merging in the few cases when it's required: a back jump to an IL
4107 * address that was not already seen as bblock starting point).
4108 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4109 * Complex operations are decomposed in simpler ones right away. We need to let the
4110 * arch-specific code peek and poke inside this process somehow (except when the
4111 * optimizations can take advantage of the full semantic info of coarse opcodes).
4112 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4113 * MonoInst->opcode initially is the IL opcode or some simplification of that
4114 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4115 * opcode with value bigger than OP_LAST.
4116 * At this point the IR can be handed over to an interpreter, a dumb code generator
4117 * or to the optimizing code generator that will translate it to SSA form.
4119 * Profiling directed optimizations.
4120 * We may compile by default with few or no optimizations and instrument the code
4121 * or the user may indicate what methods to optimize the most either in a config file
4122 * or through repeated runs where the compiler applies offline the optimizations to
4123 * each method and then decides if it was worth it.
4126 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4127 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4128 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4129 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4130 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4131 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4132 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4133 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4135 /* offset from br.s -> br like opcodes */
4136 #define BIG_BRANCH_OFFSET 13
4139 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4141 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4143 return b == NULL || b == bb;
4147 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4149 unsigned char *ip = start;
4150 unsigned char *target;
4153 MonoBasicBlock *bblock;
4154 const MonoOpcode *opcode;
4157 cli_addr = ip - start;
4158 i = mono_opcode_value ((const guint8 **)&ip, end);
4161 opcode = &mono_opcodes [i];
4162 switch (opcode->argument) {
4163 case MonoInlineNone:
4166 case MonoInlineString:
4167 case MonoInlineType:
4168 case MonoInlineField:
4169 case MonoInlineMethod:
4172 case MonoShortInlineR:
4179 case MonoShortInlineVar:
4180 case MonoShortInlineI:
4183 case MonoShortInlineBrTarget:
4184 target = start + cli_addr + 2 + (signed char)ip [1];
4185 GET_BBLOCK (cfg, bblock, target);
4188 GET_BBLOCK (cfg, bblock, ip);
4190 case MonoInlineBrTarget:
4191 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4192 GET_BBLOCK (cfg, bblock, target);
4195 GET_BBLOCK (cfg, bblock, ip);
4197 case MonoInlineSwitch: {
4198 guint32 n = read32 (ip + 1);
4201 cli_addr += 5 + 4 * n;
4202 target = start + cli_addr;
4203 GET_BBLOCK (cfg, bblock, target);
4205 for (j = 0; j < n; ++j) {
4206 target = start + cli_addr + (gint32)read32 (ip);
4207 GET_BBLOCK (cfg, bblock, target);
4217 g_assert_not_reached ();
4220 if (i == CEE_THROW) {
4221 unsigned char *bb_start = ip - 1;
4223 /* Find the start of the bblock containing the throw */
4225 while ((bb_start >= start) && !bblock) {
4226 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4230 bblock->out_of_line = 1;
4239 static inline MonoMethod *
4240 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4244 if (m->wrapper_type != MONO_WRAPPER_NONE)
4245 return mono_method_get_wrapper_data (m, token);
4247 method = mono_get_method_full (m->klass->image, token, klass, context);
4252 static inline MonoMethod *
4253 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4255 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4257 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4263 static inline MonoClass*
4264 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4268 if (method->wrapper_type != MONO_WRAPPER_NONE)
4269 klass = mono_method_get_wrapper_data (method, token);
4271 klass = mono_class_get_full (method->klass->image, token, context);
4273 mono_class_init (klass);
4278 * Returns TRUE if the JIT should abort inlining because "callee"
4279 * is influenced by security attributes.
4282 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4286 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4290 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4291 if (result == MONO_JIT_SECURITY_OK)
4294 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4295 /* Generate code to throw a SecurityException before the actual call/link */
4296 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4299 NEW_ICONST (cfg, args [0], 4);
4300 NEW_METHODCONST (cfg, args [1], caller);
4301 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, mono_method_signature (secman->linkdemandsecurityexception), args, NULL);
4302 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4303 /* don't hide previous results */
4304 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4305 cfg->exception_data = result;
4313 method_access_exception (void)
4315 static MonoMethod *method = NULL;
4318 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4319 method = mono_class_get_method_from_name (secman->securitymanager,
4320 "MethodAccessException", 2);
4327 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4328 MonoBasicBlock *bblock, unsigned char *ip)
4330 MonoMethod *thrower = method_access_exception ();
4333 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4334 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4335 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), args, NULL);
4339 verification_exception (void)
4341 static MonoMethod *method = NULL;
4344 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4345 method = mono_class_get_method_from_name (secman->securitymanager,
4346 "VerificationException", 0);
4353 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4355 MonoMethod *thrower = verification_exception ();
4357 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), NULL, NULL);
4361 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4362 MonoBasicBlock *bblock, unsigned char *ip)
4364 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4365 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4366 gboolean is_safe = TRUE;
4368 if (!(caller_level >= callee_level ||
4369 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4370 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4375 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4379 method_is_safe (MonoMethod *method)
4382 if (strcmp (method->name, "unsafeMethod") == 0)
4389 * Check that the IL instructions at ip are the array initialization
4390 * sequence and return the pointer to the data and the size.
4393 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4396 * newarr[System.Int32]
4398 * ldtoken field valuetype ...
4399 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4401 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4402 guint32 token = read32 (ip + 7);
4403 guint32 field_token = read32 (ip + 2);
4404 guint32 field_index = field_token & 0xffffff;
4406 const char *data_ptr;
4408 MonoMethod *cmethod;
4409 MonoClass *dummy_class;
4410 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4416 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4419 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4421 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4422 case MONO_TYPE_BOOLEAN:
4426 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4427 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4428 case MONO_TYPE_CHAR:
4438 return NULL; /* stupid ARM FP swapped format */
4448 if (size > mono_type_size (field->type, &dummy_align))
4451 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4452 field_index = read32 (ip + 2) & 0xffffff;
4453 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4454 data_ptr = mono_image_rva_map (method->klass->image, rva);
4455 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4456 /* for aot code we do the lookup on load */
4457 if (aot && data_ptr)
4458 return GUINT_TO_POINTER (rva);
4465 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4467 char *method_fname = mono_method_full_name (method, TRUE);
4470 if (mono_method_get_header (method)->code_size == 0)
4471 method_code = g_strdup ("method body is empty.");
4473 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4474 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4475 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4476 g_free (method_fname);
4477 g_free (method_code);
4481 set_exception_object (MonoCompile *cfg, MonoException *exception)
4483 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4484 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4485 cfg->exception_ptr = exception;
4489 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4493 if (cfg->generic_sharing_context)
4494 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4496 type = &klass->byval_arg;
4497 return MONO_TYPE_IS_REFERENCE (type);
4501 * mono_decompose_array_access_opts:
4503 * Decompose array access opcodes.
4506 mono_decompose_array_access_opts (MonoCompile *cfg)
4508 MonoBasicBlock *bb, *first_bb;
4511 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4512 * can be executed anytime. It should be run before decompose_long
4516 * Create a dummy bblock and emit code into it so we can use the normal
4517 * code generation macros.
4519 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4520 first_bb = cfg->cbb;
4522 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4524 MonoInst *prev = NULL;
4526 MonoInst *iargs [3];
4529 if (!bb->has_array_access)
4532 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4534 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4540 for (ins = bb->code; ins; ins = ins->next) {
4541 switch (ins->opcode) {
4543 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4544 G_STRUCT_OFFSET (MonoArray, max_length));
4545 MONO_ADD_INS (cfg->cbb, dest);
4547 case OP_BOUNDS_CHECK:
4548 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4551 if (cfg->opt & MONO_OPT_SHARED) {
4552 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4553 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4554 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4555 iargs [2]->dreg = ins->sreg1;
4557 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4558 dest->dreg = ins->dreg;
4560 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4563 NEW_VTABLECONST (cfg, iargs [0], vtable);
4564 MONO_ADD_INS (cfg->cbb, iargs [0]);
4565 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4566 iargs [1]->dreg = ins->sreg1;
4568 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4569 dest->dreg = ins->dreg;
4576 g_assert (cfg->cbb == first_bb);
4578 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4579 /* Replace the original instruction with the new code sequence */
4581 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4582 first_bb->code = first_bb->last_ins = NULL;
4583 first_bb->in_count = first_bb->out_count = 0;
4584 cfg->cbb = first_bb;
4591 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4601 #ifdef MONO_ARCH_SOFT_FLOAT
4604 * mono_handle_soft_float:
4606 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4607 * similar to long support on 32 bit platforms. 32 bit float values require special
4608 * handling when used as locals, arguments, and in calls.
4609 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4612 mono_handle_soft_float (MonoCompile *cfg)
4614 MonoBasicBlock *bb, *first_bb;
4617 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4621 * Create a dummy bblock and emit code into it so we can use the normal
4622 * code generation macros.
4624 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4625 first_bb = cfg->cbb;
4627 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4629 MonoInst *prev = NULL;
4632 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4634 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4640 for (ins = bb->code; ins; ins = ins->next) {
4641 const char *spec = INS_INFO (ins->opcode);
4643 /* Most fp operations are handled automatically by opcode emulation */
4645 switch (ins->opcode) {
4648 d.vald = *(double*)ins->inst_p0;
4649 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4654 /* We load the r8 value */
4655 d.vald = *(float*)ins->inst_p0;
4656 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4660 ins->opcode = OP_LMOVE;
4663 ins->opcode = OP_MOVE;
4664 ins->sreg1 = ins->sreg1 + 1;
4667 ins->opcode = OP_MOVE;
4668 ins->sreg1 = ins->sreg1 + 2;
4671 int reg = ins->sreg1;
4673 ins->opcode = OP_SETLRET;
4675 ins->sreg1 = reg + 1;
4676 ins->sreg2 = reg + 2;
4679 case OP_LOADR8_MEMBASE:
4680 ins->opcode = OP_LOADI8_MEMBASE;
4682 case OP_STORER8_MEMBASE_REG:
4683 ins->opcode = OP_STOREI8_MEMBASE_REG;
4685 case OP_STORER4_MEMBASE_REG: {
4686 MonoInst *iargs [2];
4689 /* Arg 1 is the double value */
4690 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4691 iargs [0]->dreg = ins->sreg1;
4693 /* Arg 2 is the address to store to */
4694 addr_reg = mono_alloc_preg (cfg);
4695 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4696 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4700 case OP_LOADR4_MEMBASE: {
4701 MonoInst *iargs [1];
4705 addr_reg = mono_alloc_preg (cfg);
4706 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4707 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4708 conv->dreg = ins->dreg;
4713 case OP_FCALL_MEMBASE: {
4714 MonoCallInst *call = (MonoCallInst*)ins;
4715 if (call->signature->ret->type == MONO_TYPE_R4) {
4716 MonoCallInst *call2;
4717 MonoInst *iargs [1];
4720 /* Convert the call into a call returning an int */
4721 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4722 memcpy (call2, call, sizeof (MonoCallInst));
4723 switch (ins->opcode) {
4725 call2->inst.opcode = OP_CALL;
4728 call2->inst.opcode = OP_CALL_REG;
4730 case OP_FCALL_MEMBASE:
4731 call2->inst.opcode = OP_CALL_MEMBASE;
4734 g_assert_not_reached ();
4736 call2->inst.dreg = mono_alloc_ireg (cfg);
4737 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4739 /* FIXME: Optimize this */
4741 /* Emit an r4->r8 conversion */
4742 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4743 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4744 conv->dreg = ins->dreg;
4746 switch (ins->opcode) {
4748 ins->opcode = OP_LCALL;
4751 ins->opcode = OP_LCALL_REG;
4753 case OP_FCALL_MEMBASE:
4754 ins->opcode = OP_LCALL_MEMBASE;
4757 g_assert_not_reached ();
4763 MonoJitICallInfo *info;
4764 MonoInst *iargs [2];
4765 MonoInst *call, *cmp, *br;
4767 /* Convert fcompare+fbcc to icall+icompare+beq */
4769 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4772 /* Create dummy MonoInst's for the arguments */
4773 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4774 iargs [0]->dreg = ins->sreg1;
4775 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4776 iargs [1]->dreg = ins->sreg2;
4778 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4780 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4781 cmp->sreg1 = call->dreg;
4783 MONO_ADD_INS (cfg->cbb, cmp);
4785 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4786 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4787 br->inst_true_bb = ins->next->inst_true_bb;
4788 br->inst_false_bb = ins->next->inst_false_bb;
4789 MONO_ADD_INS (cfg->cbb, br);
4791 /* The call sequence might include fp ins */
4794 /* Skip fbcc or fccc */
4795 NULLIFY_INS (ins->next);
4803 MonoJitICallInfo *info;
4804 MonoInst *iargs [2];
4807 /* Convert fccc to icall+icompare+iceq */
4809 info = mono_find_jit_opcode_emulation (ins->opcode);
4812 /* Create dummy MonoInst's for the arguments */
4813 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4814 iargs [0]->dreg = ins->sreg1;
4815 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4816 iargs [1]->dreg = ins->sreg2;
4818 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4821 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4823 /* The call sequence might include fp ins */
4828 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4829 mono_print_ins (ins);
4830 g_assert_not_reached ();
4835 g_assert (cfg->cbb == first_bb);
4837 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4838 /* Replace the original instruction with the new code sequence */
4840 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4841 first_bb->code = first_bb->last_ins = NULL;
4842 first_bb->in_count = first_bb->out_count = 0;
4843 cfg->cbb = first_bb;
4850 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4853 mono_decompose_long_opts (cfg);
4859 * mono_method_to_ir: translates IL into basic blocks containing trees
4862 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4863 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4864 guint inline_offset, gboolean is_virtual_call)
4866 MonoInst *ins, **sp, **stack_start;
4867 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4868 MonoMethod *cmethod, *method_definition;
4869 MonoInst **arg_array;
4870 MonoMethodHeader *header;
4872 guint32 token, ins_flag;
4874 MonoClass *constrained_call = NULL;
4875 unsigned char *ip, *end, *target, *err_pos;
4876 static double r8_0 = 0.0;
4877 MonoMethodSignature *sig;
4878 MonoGenericContext *generic_context = NULL;
4879 MonoGenericContainer *generic_container = NULL;
4880 MonoType **param_types;
4881 GList *bb_recheck = NULL, *tmp;
4882 int i, n, start_new_bblock, dreg;
4883 int num_calls = 0, inline_costs = 0;
4884 int breakpoint_id = 0;
4886 MonoBoolean security, pinvoke;
4887 MonoSecurityManager* secman = NULL;
4888 MonoDeclSecurityActions actions;
4889 GSList *class_inits = NULL;
4890 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4893 /* serialization and xdomain stuff may need access to private fields and methods */
4894 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4895 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4896 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4897 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4898 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4899 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4901 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4903 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4904 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4905 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4906 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4908 image = method->klass->image;
4909 header = mono_method_get_header (method);
4910 generic_container = mono_method_get_generic_container (method);
4911 sig = mono_method_signature (method);
4912 num_args = sig->hasthis + sig->param_count;
4913 ip = (unsigned char*)header->code;
4914 cfg->cil_start = ip;
4915 end = ip + header->code_size;
4916 mono_jit_stats.cil_code_size += header->code_size;
4918 method_definition = method;
4919 while (method_definition->is_inflated) {
4920 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4921 method_definition = imethod->declaring;
4924 /* SkipVerification is not allowed if core-clr is enabled */
4925 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4927 dont_verify_stloc = TRUE;
4930 if (!dont_verify && mini_method_verify (cfg, method_definition))
4931 goto exception_exit;
4933 if (sig->is_inflated)
4934 generic_context = mono_method_get_context (method);
4935 else if (generic_container)
4936 generic_context = &generic_container->context;
4937 cfg->generic_context = generic_context;
4939 if (!cfg->generic_sharing_context)
4940 g_assert (!sig->has_type_parameters);
4942 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4943 g_assert (method->is_inflated);
4944 g_assert (mono_method_get_context (method)->method_inst);
4946 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4947 g_assert (sig->generic_param_count);
4949 if (cfg->method == method) {
4950 cfg->real_offset = 0;
4952 cfg->real_offset = inline_offset;
4955 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4956 cfg->cil_offset_to_bb_len = header->code_size;
4958 cfg->current_method = method;
4960 if (cfg->verbose_level > 2)
4961 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4963 dont_inline = g_list_prepend (dont_inline, method);
4964 if (cfg->method == method) {
4966 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4967 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4970 NEW_BBLOCK (cfg, start_bblock);
4971 cfg->bb_entry = start_bblock;
4972 start_bblock->cil_code = NULL;
4973 start_bblock->cil_length = 0;
4976 NEW_BBLOCK (cfg, end_bblock);
4977 cfg->bb_exit = end_bblock;
4978 end_bblock->cil_code = NULL;
4979 end_bblock->cil_length = 0;
4980 g_assert (cfg->num_bblocks == 2);
4982 arg_array = alloca (sizeof (MonoInst *) * num_args);
4983 for (i = num_args - 1; i >= 0; i--)
4984 arg_array [i] = cfg->args [i];
4986 if (header->num_clauses) {
4987 cfg->spvars = g_hash_table_new (NULL, NULL);
4988 cfg->exvars = g_hash_table_new (NULL, NULL);
4990 /* handle exception clauses */
4991 for (i = 0; i < header->num_clauses; ++i) {
4992 MonoBasicBlock *try_bb;
4993 MonoExceptionClause *clause = &header->clauses [i];
4994 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4995 try_bb->real_offset = clause->try_offset;
4996 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4997 tblock->real_offset = clause->handler_offset;
4998 tblock->flags |= BB_EXCEPTION_HANDLER;
5000 link_bblock (cfg, try_bb, tblock);
5002 if (*(ip + clause->handler_offset) == CEE_POP)
5003 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5005 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5006 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5007 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5008 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5009 MONO_ADD_INS (tblock, ins);
5011 /* todo: is a fault block unsafe to optimize? */
5012 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5013 tblock->flags |= BB_EXCEPTION_UNSAFE;
5017 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5019 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5021 /* catch and filter blocks get the exception object on the stack */
5022 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5023 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5024 MonoInst *dummy_use;
5026 /* mostly like handle_stack_args (), but just sets the input args */
5027 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5028 tblock->in_scount = 1;
5029 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5030 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5033 * Add a dummy use for the exvar so its liveness info will be
5037 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5039 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5040 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5041 tblock->real_offset = clause->data.filter_offset;
5042 tblock->in_scount = 1;
5043 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5044 /* The filter block shares the exvar with the handler block */
5045 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5046 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5047 MONO_ADD_INS (tblock, ins);
5051 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5052 clause->data.catch_class &&
5053 cfg->generic_sharing_context &&
5054 mono_class_check_context_used (clause->data.catch_class)) {
5055 if (mono_method_get_context (method)->method_inst)
5056 GENERIC_SHARING_FAILURE (CEE_NOP);
5059 * In shared generic code with catch
5060 * clauses containing type variables
5061 * the exception handling code has to
5062 * be able to get to the rgctx.
5063 * Therefore we have to make sure that
5064 * the vtable/mrgctx argument (for
5065 * static or generic methods) or the
5066 * "this" argument (for non-static
5067 * methods) are live.
5069 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5070 mini_method_get_context (method)->method_inst) {
5071 mono_get_vtable_var (cfg);
5073 MonoInst *dummy_use;
5075 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5080 arg_array = alloca (sizeof (MonoInst *) * num_args);
5081 cfg->cbb = start_bblock;
5082 mono_save_args (cfg, sig, inline_args, arg_array);
5085 /* FIRST CODE BLOCK */
5086 NEW_BBLOCK (cfg, bblock);
5087 bblock->cil_code = ip;
5091 ADD_BBLOCK (cfg, bblock);
5093 if (cfg->method == method) {
5094 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5095 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5096 MONO_INST_NEW (cfg, ins, OP_BREAK);
5097 MONO_ADD_INS (bblock, ins);
5101 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5102 secman = mono_security_manager_get_methods ();
5104 security = (secman && mono_method_has_declsec (method));
5105 /* at this point having security doesn't mean we have any code to generate */
5106 if (security && (cfg->method == method)) {
5107 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5108 * And we do not want to enter the next section (with allocation) if we
5109 * have nothing to generate */
5110 security = mono_declsec_get_demands (method, &actions);
5113 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5114 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5116 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5117 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5118 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5120 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5121 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5125 mono_custom_attrs_free (custom);
5128 custom = mono_custom_attrs_from_class (wrapped->klass);
5129 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5133 mono_custom_attrs_free (custom);
5136 /* not a P/Invoke after all */
5141 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5142 /* we use a separate basic block for the initialization code */
5143 NEW_BBLOCK (cfg, init_localsbb);
5144 cfg->bb_init = init_localsbb;
5145 init_localsbb->real_offset = cfg->real_offset;
5146 start_bblock->next_bb = init_localsbb;
5147 init_localsbb->next_bb = bblock;
5148 link_bblock (cfg, start_bblock, init_localsbb);
5149 link_bblock (cfg, init_localsbb, bblock);
5151 cfg->cbb = init_localsbb;
5153 start_bblock->next_bb = bblock;
5154 link_bblock (cfg, start_bblock, bblock);
5157 /* at this point we know, if security is TRUE, that some code needs to be generated */
5158 if (security && (cfg->method == method)) {
5161 mono_jit_stats.cas_demand_generation++;
5163 if (actions.demand.blob) {
5164 /* Add code for SecurityAction.Demand */
5165 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5166 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5167 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5168 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5170 if (actions.noncasdemand.blob) {
5171 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5172 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5173 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5174 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5175 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5176 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5178 if (actions.demandchoice.blob) {
5179 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5180 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5181 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5182 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5183 mono_emit_method_call (cfg, secman->demandchoice, mono_method_signature (secman->demandchoice), args, NULL);
5187 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5189 mono_emit_method_call (cfg, secman->demandunmanaged, mono_method_signature (secman->demandunmanaged), NULL, NULL);
5192 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5193 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5194 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5195 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5196 if (!(method->klass && method->klass->image &&
5197 mono_security_core_clr_is_platform_image (method->klass->image))) {
5198 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5202 if (!method_is_safe (method))
5203 emit_throw_verification_exception (cfg, bblock, ip);
5206 if (header->code_size == 0)
5209 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5214 if (cfg->method == method)
5215 mono_debug_init_method (cfg, bblock, breakpoint_id);
5217 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5219 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5220 for (n = 0; n < sig->param_count; ++n)
5221 param_types [n + sig->hasthis] = sig->params [n];
5222 for (n = 0; n < header->num_locals; ++n) {
5223 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5228 /* add a check for this != NULL to inlined methods */
5229 if (is_virtual_call) {
5232 NEW_ARGLOAD (cfg, arg_ins, 0);
5233 MONO_ADD_INS (cfg->cbb, arg_ins);
5234 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5235 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5236 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5239 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5240 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5243 start_new_bblock = 0;
5247 if (cfg->method == method)
5248 cfg->real_offset = ip - header->code;
5250 cfg->real_offset = inline_offset;
5255 if (start_new_bblock) {
5256 bblock->cil_length = ip - bblock->cil_code;
5257 if (start_new_bblock == 2) {
5258 g_assert (ip == tblock->cil_code);
5260 GET_BBLOCK (cfg, tblock, ip);
5262 bblock->next_bb = tblock;
5265 start_new_bblock = 0;
5266 for (i = 0; i < bblock->in_scount; ++i) {
5267 if (cfg->verbose_level > 3)
5268 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5269 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5273 g_slist_free (class_inits);
5276 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5277 link_bblock (cfg, bblock, tblock);
5278 if (sp != stack_start) {
5279 handle_stack_args (cfg, stack_start, sp - stack_start);
5281 CHECK_UNVERIFIABLE (cfg);
5283 bblock->next_bb = tblock;
5286 for (i = 0; i < bblock->in_scount; ++i) {
5287 if (cfg->verbose_level > 3)
5288 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5289 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5292 g_slist_free (class_inits);
5297 bblock->real_offset = cfg->real_offset;
5299 if ((cfg->method == method) && cfg->coverage_info) {
5300 guint32 cil_offset = ip - header->code;
5301 cfg->coverage_info->data [cil_offset].cil_code = ip;
5303 /* TODO: Use an increment here */
5304 #if defined(__i386__)
5305 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5306 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5308 MONO_ADD_INS (cfg->cbb, ins);
5310 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5311 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5315 if (cfg->verbose_level > 3)
5316 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5321 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5323 MONO_ADD_INS (bblock, ins);
5329 CHECK_STACK_OVF (1);
5330 n = (*ip)-CEE_LDARG_0;
5332 EMIT_NEW_ARGLOAD (cfg, ins, n);
5340 CHECK_STACK_OVF (1);
5341 n = (*ip)-CEE_LDLOC_0;
5343 EMIT_NEW_LOCLOAD (cfg, ins, n);
5354 n = (*ip)-CEE_STLOC_0;
5357 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5360 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5361 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5362 /* Optimize reg-reg moves away */
5364 * Can't optimize other opcodes, since sp[0] might point to
5365 * the last ins of a decomposed opcode.
5367 sp [0]->dreg = (cfg)->locals [n]->dreg;
5369 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5377 CHECK_STACK_OVF (1);
5380 EMIT_NEW_ARGLOAD (cfg, ins, n);
5386 CHECK_STACK_OVF (1);
5389 NEW_ARGLOADA (cfg, ins, n);
5390 MONO_ADD_INS (cfg->cbb, ins);
5400 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5402 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5407 CHECK_STACK_OVF (1);
5410 EMIT_NEW_LOCLOAD (cfg, ins, n);
5416 CHECK_STACK_OVF (1);
5417 CHECK_LOCAL (ip [1]);
5420 * ldloca inhibits many optimizations so try to get rid of it in common
5423 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5424 gboolean skip = FALSE;
5426 /* From the INITOBJ case */
5427 token = read32 (ip + 4);
5428 klass = mini_get_class (method, token, generic_context);
5429 CHECK_TYPELOAD (klass);
5430 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
5431 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
5433 if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5434 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5435 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5436 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5448 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5456 CHECK_LOCAL (ip [1]);
5457 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5459 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5464 CHECK_STACK_OVF (1);
5465 EMIT_NEW_PCONST (cfg, ins, NULL);
5466 ins->type = STACK_OBJ;
5471 CHECK_STACK_OVF (1);
5472 EMIT_NEW_ICONST (cfg, ins, -1);
5485 CHECK_STACK_OVF (1);
5486 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5492 CHECK_STACK_OVF (1);
5494 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5500 CHECK_STACK_OVF (1);
5501 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5507 CHECK_STACK_OVF (1);
5508 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5509 ins->type = STACK_I8;
5510 ins->dreg = alloc_dreg (cfg, STACK_I8);
5512 ins->inst_l = (gint64)read64 (ip);
5513 MONO_ADD_INS (bblock, ins);
5519 /* FIXME: we should really allocate this only late in the compilation process */
5520 mono_domain_lock (cfg->domain);
5521 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5522 mono_domain_unlock (cfg->domain);
5524 CHECK_STACK_OVF (1);
5525 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5526 ins->type = STACK_R8;
5527 ins->dreg = alloc_dreg (cfg, STACK_R8);
5531 MONO_ADD_INS (bblock, ins);
5539 /* FIXME: we should really allocate this only late in the compilation process */
5540 mono_domain_lock (cfg->domain);
5541 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5542 mono_domain_unlock (cfg->domain);
5544 CHECK_STACK_OVF (1);
5545 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5546 ins->type = STACK_R8;
5547 ins->dreg = alloc_dreg (cfg, STACK_R8);
5551 MONO_ADD_INS (bblock, ins);
5558 MonoInst *temp, *store;
5560 CHECK_STACK_OVF (1);
5564 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5565 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5567 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5570 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5583 if (sp [0]->type == STACK_R8)
5584 /* we need to pop the value from the x86 FP stack */
5585 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5592 if (stack_start != sp)
5594 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5595 ins = (MonoInst*)call;
5596 token = read32 (ip + 1);
5597 /* FIXME: check the signature matches */
5598 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5603 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5604 GENERIC_SHARING_FAILURE (CEE_JMP);
5606 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5607 if (check_linkdemand (cfg, method, cmethod))
5609 CHECK_CFG_EXCEPTION;
5612 ins->inst_p0 = cmethod;
5613 MONO_ADD_INS (bblock, ins);
5615 start_new_bblock = 1;
5618 cfg->disable_aot = 1;
5623 case CEE_CALLVIRT: {
5624 MonoInst *addr = NULL;
5625 MonoMethodSignature *fsig = NULL;
5627 int virtual = *ip == CEE_CALLVIRT;
5628 int calli = *ip == CEE_CALLI;
5629 gboolean pass_imt_from_rgctx = FALSE;
5630 MonoInst *imt_arg = NULL;
5631 gboolean pass_vtable = FALSE;
5632 gboolean pass_mrgctx = FALSE;
5633 MonoInst *vtable_arg = NULL;
5634 gboolean check_this = FALSE;
5637 token = read32 (ip + 1);
5644 if (method->wrapper_type != MONO_WRAPPER_NONE)
5645 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5647 fsig = mono_metadata_parse_signature (image, token);
5649 n = fsig->param_count + fsig->hasthis;
5651 MonoMethod *cil_method;
5653 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5654 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5655 cil_method = cmethod;
5656 } else if (constrained_call) {
5657 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5659 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5660 cil_method = cmethod;
5665 if (!dont_verify && !cfg->skip_visibility) {
5666 MonoMethod *target_method = cil_method;
5667 if (method->is_inflated) {
5668 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5670 if (!mono_method_can_access_method (method_definition, target_method) &&
5671 !mono_method_can_access_method (method, cil_method))
5672 METHOD_ACCESS_FAILURE;
5675 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5676 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5678 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5679 /* MS.NET seems to silently convert this to a callvirt */
5682 if (!cmethod->klass->inited)
5683 if (!mono_class_init (cmethod->klass))
5686 if (mono_method_signature (cmethod)->pinvoke) {
5687 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
5688 fsig = mono_method_signature (wrapper);
5689 } else if (constrained_call) {
5690 fsig = mono_method_signature (cmethod);
5692 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5695 mono_save_token_info (cfg, image, token, cmethod);
5697 n = fsig->param_count + fsig->hasthis;
5699 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5700 if (check_linkdemand (cfg, method, cmethod))
5702 CHECK_CFG_EXCEPTION;
5705 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5706 mini_class_is_system_array (cmethod->klass)) {
5707 array_rank = cmethod->klass->rank;
5710 if (cmethod->string_ctor)
5711 g_assert_not_reached ();
5714 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5717 if (!cfg->generic_sharing_context && cmethod)
5718 g_assert (!mono_method_check_context_used (cmethod));
5722 //g_assert (!virtual || fsig->hasthis);
5726 if (constrained_call) {
5728 * We have the `constrained.' prefix opcode.
5730 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5734 * The type parameter is instantiated as a valuetype,
5735 * but that type doesn't override the method we're
5736 * calling, so we need to box `this'.
5738 dreg = alloc_dreg (cfg, STACK_VTYPE);
5739 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5740 ins->klass = constrained_call;
5741 sp [0] = handle_box (cfg, ins, constrained_call);
5742 } else if (!constrained_call->valuetype) {
5743 int dreg = alloc_preg (cfg);
5746 * The type parameter is instantiated as a reference
5747 * type. We have a managed pointer on the stack, so
5748 * we need to dereference it here.
5750 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5751 ins->type = STACK_OBJ;
5753 } else if (cmethod->klass->valuetype)
5755 constrained_call = NULL;
5758 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5762 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5763 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5764 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5765 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5766 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5769 * Pass vtable iff target method might
5770 * be shared, which means that sharing
5771 * is enabled for its class and its
5772 * context is sharable (and it's not a
5775 if (sharing_enabled && context_sharable &&
5776 !mini_method_get_context (cmethod)->method_inst)
5780 if (cmethod && mini_method_get_context (cmethod) &&
5781 mini_method_get_context (cmethod)->method_inst) {
5782 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5783 MonoGenericContext *context = mini_method_get_context (cmethod);
5784 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5786 g_assert (!pass_vtable);
5788 if (sharing_enabled && context_sharable)
5792 if (cfg->generic_sharing_context && cmethod) {
5793 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5795 context_used = mono_method_check_context_used (cmethod);
5797 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5798 /* Generic method interface
5799 calls are resolved via a
5800 helper function and don't
5802 if (!cmethod_context || !cmethod_context->method_inst)
5803 pass_imt_from_rgctx = TRUE;
5807 * If a shared method calls another
5808 * shared method then the caller must
5809 * have a generic sharing context
5810 * because the magic trampoline
5811 * requires it. FIXME: We shouldn't
5812 * have to force the vtable/mrgctx
5813 * variable here. Instead there
5814 * should be a flag in the cfg to
5815 * request a generic sharing context.
5817 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5818 mono_get_vtable_var (cfg);
5823 GENERIC_SHARING_FAILURE (*ip);
5829 EMIT_GET_RGCTX (rgctx, context_used);
5830 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5832 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5834 CHECK_TYPELOAD (cmethod->klass);
5835 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5840 g_assert (!vtable_arg);
5845 EMIT_GET_RGCTX (rgctx, context_used);
5846 vtable_arg = emit_get_rgctx_method_rgctx (cfg, context_used, rgctx, cmethod);
5848 MonoMethodRuntimeGenericContext *mrgctx;
5850 mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
5851 mini_method_get_context (cmethod)->method_inst);
5853 EMIT_NEW_PCONST (cfg, vtable_arg, mrgctx);
5856 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5857 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5864 if (pass_imt_from_rgctx) {
5867 g_assert (!pass_vtable);
5870 EMIT_GET_RGCTX (rgctx, context_used);
5871 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5872 MONO_RGCTX_INFO_METHOD);
5878 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5879 check->sreg1 = sp [0]->dreg;
5880 MONO_ADD_INS (cfg->cbb, check);
5883 /* Calling virtual generic methods */
5884 if (cmethod && virtual &&
5885 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5886 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5887 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5888 mono_method_signature (cmethod)->generic_param_count) {
5889 MonoInst *this_temp, *this_arg_temp, *store;
5890 MonoInst *iargs [4];
5892 g_assert (mono_method_signature (cmethod)->is_inflated);
5894 /* Prevent inlining of methods that contain indirect calls */
5897 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5898 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5899 MONO_ADD_INS (bblock, store);
5901 /* FIXME: This should be a managed pointer */
5902 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5904 /* Because of the PCONST below */
5905 cfg->disable_aot = TRUE;
5906 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5910 EMIT_GET_RGCTX (rgctx, context_used);
5911 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5912 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5913 addr = mono_emit_jit_icall (cfg,
5914 mono_helper_compile_generic_method_wo_context, iargs);
5916 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5917 EMIT_NEW_PCONST (cfg, iargs [2], mono_method_get_context (cmethod));
5918 EMIT_NEW_TEMPLOADA (cfg, iargs [3], this_arg_temp->inst_c0);
5919 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5922 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5924 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5925 if (!MONO_TYPE_IS_VOID (fsig->ret))
5934 /* FIXME: runtime generic context pointer for jumps? */
5935 /* FIXME: handle this for generic sharing eventually */
5936 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5937 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5940 /* FIXME: runtime generic context pointer for jumps? */
5941 GENERIC_SHARING_FAILURE (*ip);
5943 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5946 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5947 call->tail_call = TRUE;
5948 call->method = cmethod;
5949 call->signature = mono_method_signature (cmethod);
5952 /* Handle tail calls similarly to calls */
5953 call->inst.opcode = OP_TAILCALL;
5955 mono_arch_emit_call (cfg, call);
5958 * We implement tail calls by storing the actual arguments into the
5959 * argument variables, then emitting a CEE_JMP.
5961 for (i = 0; i < n; ++i) {
5962 /* Prevent argument from being register allocated */
5963 arg_array [i]->flags |= MONO_INST_VOLATILE;
5964 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5969 cfg->disable_aot = 1;
5971 ins = (MonoInst*)call;
5972 ins->inst_p0 = cmethod;
5973 ins->inst_p1 = arg_array [0];
5974 MONO_ADD_INS (bblock, ins);
5975 link_bblock (cfg, bblock, end_bblock);
5976 start_new_bblock = 1;
5977 /* skip CEE_RET as well */
5983 /* Conversion to a JIT intrinsic */
5984 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5985 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5986 type_to_eval_stack_type ((cfg), fsig->ret, ins);
5997 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
5998 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
5999 mono_method_check_inlining (cfg, cmethod) &&
6000 !g_list_find (dont_inline, cmethod)) {
6002 gboolean allways = FALSE;
6004 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6005 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6006 /* Prevent inlining of methods that call wrappers */
6008 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
6012 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6014 cfg->real_offset += 5;
6017 if (!MONO_TYPE_IS_VOID (fsig->ret))
6018 /* *sp is already set by inline_method */
6021 inline_costs += costs;
6027 inline_costs += 10 * num_calls++;
6029 /* Tail recursion elimination */
6030 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6031 gboolean has_vtargs = FALSE;
6034 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6037 /* keep it simple */
6038 for (i = fsig->param_count - 1; i >= 0; i--) {
6039 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6044 for (i = 0; i < n; ++i)
6045 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6046 MONO_INST_NEW (cfg, ins, OP_BR);
6047 MONO_ADD_INS (bblock, ins);
6048 tblock = start_bblock->out_bb [0];
6049 link_bblock (cfg, bblock, tblock);
6050 ins->inst_target_bb = tblock;
6051 start_new_bblock = 1;
6053 /* skip the CEE_RET, too */
6054 if (ip_in_bb (cfg, bblock, ip + 5))
6064 /* Generic sharing */
6065 /* FIXME: only do this for generic methods if
6066 they are not shared! */
6068 (cmethod->klass->valuetype ||
6069 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6070 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6071 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6072 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6073 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6074 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6079 g_assert (cfg->generic_sharing_context && cmethod);
6083 * We are compiling a call to a
6084 * generic method from shared code,
6085 * which means that we have to look up
6086 * the method in the rgctx and do an
6090 EMIT_GET_RGCTX (rgctx, context_used);
6091 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6094 /* Indirect calls */
6096 g_assert (!imt_arg);
6098 if (*ip == CEE_CALL)
6099 g_assert (context_used);
6100 else if (*ip == CEE_CALLI)
6101 g_assert (!vtable_arg);
6103 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6104 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6106 /* Prevent inlining of methods with indirect calls */
6110 #ifdef MONO_ARCH_RGCTX_REG
6112 int rgctx_reg = mono_alloc_preg (cfg);
6114 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6115 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6116 call = (MonoCallInst*)ins;
6117 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6119 GENERIC_SHARING_FAILURE (*ip);
6122 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6124 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6125 if (fsig->pinvoke && !fsig->ret->byref) {
6129 * Native code might return non register sized integers
6130 * without initializing the upper bits.
6132 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6133 case OP_LOADI1_MEMBASE:
6134 widen_op = OP_ICONV_TO_I1;
6136 case OP_LOADU1_MEMBASE:
6137 widen_op = OP_ICONV_TO_U1;
6139 case OP_LOADI2_MEMBASE:
6140 widen_op = OP_ICONV_TO_I2;
6142 case OP_LOADU2_MEMBASE:
6143 widen_op = OP_ICONV_TO_U2;
6149 if (widen_op != -1) {
6150 int dreg = alloc_preg (cfg);
6153 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6154 widen->type = ins->type;
6171 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6172 if (sp [fsig->param_count]->type == STACK_OBJ) {
6173 MonoInst *iargs [2];
6176 iargs [1] = sp [fsig->param_count];
6178 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6181 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6182 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6183 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6184 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6186 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6189 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6190 if (!cmethod->klass->element_class->valuetype && !readonly)
6191 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6194 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6197 g_assert_not_reached ();
6205 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6207 if (!MONO_TYPE_IS_VOID (fsig->ret))
6218 #ifdef MONO_ARCH_RGCTX_REG
6220 int rgctx_reg = mono_alloc_preg (cfg);
6222 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6223 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6224 call = (MonoCallInst*)ins;
6225 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6227 GENERIC_SHARING_FAILURE (*ip);
6229 } else if (imt_arg) {
6230 ins = (MonoInst*)mono_emit_imt_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6232 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6235 if (!MONO_TYPE_IS_VOID (fsig->ret))
6243 if (cfg->method != method) {
6244 /* return from inlined method */
6249 //g_assert (returnvar != -1);
6250 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6251 cfg->ret_var_set = TRUE;
6255 MonoType *ret_type = mono_method_signature (method)->ret;
6257 g_assert (!return_var);
6260 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6263 if (!cfg->vret_addr) {
6266 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6268 EMIT_NEW_RETLOADA (cfg, ret_addr);
6270 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6271 ins->klass = mono_class_from_mono_type (ret_type);
6274 #ifdef MONO_ARCH_SOFT_FLOAT
6275 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6276 MonoInst *iargs [1];
6280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6281 mono_arch_emit_setret (cfg, method, conv);
6283 mono_arch_emit_setret (cfg, method, *sp);
6286 mono_arch_emit_setret (cfg, method, *sp);
6291 if (sp != stack_start)
6293 MONO_INST_NEW (cfg, ins, OP_BR);
6295 ins->inst_target_bb = end_bblock;
6296 MONO_ADD_INS (bblock, ins);
6297 link_bblock (cfg, bblock, end_bblock);
6298 start_new_bblock = 1;
6302 MONO_INST_NEW (cfg, ins, OP_BR);
6304 target = ip + 1 + (signed char)(*ip);
6306 GET_BBLOCK (cfg, tblock, target);
6307 link_bblock (cfg, bblock, tblock);
6308 CHECK_BBLOCK (target, ip, tblock);
6309 ins->inst_target_bb = tblock;
6310 if (sp != stack_start) {
6311 handle_stack_args (cfg, stack_start, sp - stack_start);
6313 CHECK_UNVERIFIABLE (cfg);
6315 MONO_ADD_INS (bblock, ins);
6316 start_new_bblock = 1;
6317 inline_costs += BRANCH_COST;
6331 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6333 target = ip + 1 + *(signed char*)ip;
6339 inline_costs += BRANCH_COST;
6343 MONO_INST_NEW (cfg, ins, OP_BR);
6346 target = ip + 4 + (gint32)read32(ip);
6348 GET_BBLOCK (cfg, tblock, target);
6349 link_bblock (cfg, bblock, tblock);
6350 CHECK_BBLOCK (target, ip, tblock);
6351 ins->inst_target_bb = tblock;
6352 if (sp != stack_start) {
6353 handle_stack_args (cfg, stack_start, sp - stack_start);
6355 CHECK_UNVERIFIABLE (cfg);
6358 MONO_ADD_INS (bblock, ins);
6360 start_new_bblock = 1;
6361 inline_costs += BRANCH_COST;
6368 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6369 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6370 guint32 opsize = is_short ? 1 : 4;
6372 CHECK_OPSIZE (opsize);
6374 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6377 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6382 GET_BBLOCK (cfg, tblock, target);
6383 link_bblock (cfg, bblock, tblock);
6384 CHECK_BBLOCK (target, ip, tblock);
6385 GET_BBLOCK (cfg, tblock, ip);
6386 link_bblock (cfg, bblock, tblock);
6388 if (sp != stack_start) {
6389 handle_stack_args (cfg, stack_start, sp - stack_start);
6390 CHECK_UNVERIFIABLE (cfg);
6393 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6394 cmp->sreg1 = sp [0]->dreg;
6395 type_from_op (cmp, sp [0], NULL);
6398 #if SIZEOF_VOID_P == 4
6399 if (cmp->opcode == OP_LCOMPARE_IMM) {
6400 /* Convert it to OP_LCOMPARE */
6401 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6402 ins->type = STACK_I8;
6403 ins->dreg = alloc_dreg (cfg, STACK_I8);
6405 MONO_ADD_INS (bblock, ins);
6406 cmp->opcode = OP_LCOMPARE;
6407 cmp->sreg2 = ins->dreg;
6410 MONO_ADD_INS (bblock, cmp);
6412 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6413 type_from_op (ins, sp [0], NULL);
6414 MONO_ADD_INS (bblock, ins);
6415 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6416 GET_BBLOCK (cfg, tblock, target);
6417 ins->inst_true_bb = tblock;
6418 GET_BBLOCK (cfg, tblock, ip);
6419 ins->inst_false_bb = tblock;
6420 start_new_bblock = 2;
6423 inline_costs += BRANCH_COST;
6438 MONO_INST_NEW (cfg, ins, *ip);
6440 target = ip + 4 + (gint32)read32(ip);
6446 inline_costs += BRANCH_COST;
6450 MonoBasicBlock **targets;
6451 MonoBasicBlock *default_bblock;
6452 MonoJumpInfoBBTable *table;
6453 int offset_reg = alloc_preg (cfg);
6454 int target_reg = alloc_preg (cfg);
6455 int table_reg = alloc_preg (cfg);
6456 int sum_reg = alloc_preg (cfg);
6460 n = read32 (ip + 1);
6463 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6467 CHECK_OPSIZE (n * sizeof (guint32));
6468 target = ip + n * sizeof (guint32);
6470 GET_BBLOCK (cfg, default_bblock, target);
6472 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6473 for (i = 0; i < n; ++i) {
6474 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6475 targets [i] = tblock;
6479 if (sp != stack_start) {
6481 * Link the current bb with the targets as well, so handle_stack_args
6482 * will set their in_stack correctly.
6484 link_bblock (cfg, bblock, default_bblock);
6485 for (i = 0; i < n; ++i)
6486 link_bblock (cfg, bblock, targets [i]);
6488 handle_stack_args (cfg, stack_start, sp - stack_start);
6490 CHECK_UNVERIFIABLE (cfg);
6493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6497 for (i = 0; i < n; ++i)
6498 link_bblock (cfg, bblock, targets [i]);
6500 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6501 table->table = targets;
6502 table->table_size = n;
6505 /* ARM implements SWITCH statements differently */
6506 /* FIXME: Make it use the generic implementation */
6507 /* the backend code will deal with aot vs normal case */
6508 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6509 ins->sreg1 = src1->dreg;
6510 ins->inst_p0 = table;
6511 ins->inst_many_bb = targets;
6512 ins->klass = GUINT_TO_POINTER (n);
6513 MONO_ADD_INS (cfg->cbb, ins);
6515 if (sizeof (gpointer) == 8)
6516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6520 #if SIZEOF_VOID_P == 8
6521 /* The upper word might not be zero, and we add it to a 64 bit address later */
6522 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6525 if (cfg->compile_aot) {
6526 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6528 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6529 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6530 ins->inst_p0 = table;
6531 ins->dreg = table_reg;
6532 MONO_ADD_INS (cfg->cbb, ins);
6535 /* FIXME: Use load_memindex */
6536 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6538 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6540 start_new_bblock = 1;
6541 inline_costs += (BRANCH_COST * 2);
6561 dreg = alloc_freg (cfg);
6564 dreg = alloc_lreg (cfg);
6567 dreg = alloc_preg (cfg);
6570 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6571 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6572 ins->flags |= ins_flag;
6574 MONO_ADD_INS (bblock, ins);
6589 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6590 ins->flags |= ins_flag;
6592 MONO_ADD_INS (bblock, ins);
6600 MONO_INST_NEW (cfg, ins, (*ip));
6602 ins->sreg1 = sp [0]->dreg;
6603 ins->sreg2 = sp [1]->dreg;
6604 type_from_op (ins, sp [0], sp [1]);
6606 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6608 /* Use the immediate opcodes if possible */
6609 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6610 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6611 if (imm_opcode != -1) {
6612 ins->opcode = imm_opcode;
6613 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6616 sp [1]->opcode = OP_NOP;
6620 MONO_ADD_INS ((cfg)->cbb, (ins));
6623 mono_decompose_opcode (cfg, ins);
6640 MONO_INST_NEW (cfg, ins, (*ip));
6642 ins->sreg1 = sp [0]->dreg;
6643 ins->sreg2 = sp [1]->dreg;
6644 type_from_op (ins, sp [0], sp [1]);
6646 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6647 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6649 /* FIXME: Pass opcode to is_inst_imm */
6651 /* Use the immediate opcodes if possible */
6652 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6655 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6656 if (imm_opcode != -1) {
6657 ins->opcode = imm_opcode;
6658 if (sp [1]->opcode == OP_I8CONST) {
6659 #if SIZEOF_VOID_P == 8
6660 ins->inst_imm = sp [1]->inst_l;
6662 ins->inst_ls_word = sp [1]->inst_ls_word;
6663 ins->inst_ms_word = sp [1]->inst_ms_word;
6667 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6670 sp [1]->opcode = OP_NOP;
6673 MONO_ADD_INS ((cfg)->cbb, (ins));
6676 mono_decompose_opcode (cfg, ins);
6689 case CEE_CONV_OVF_I8:
6690 case CEE_CONV_OVF_U8:
6694 /* Special case this earlier so we have long constants in the IR */
6695 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6696 int data = sp [-1]->inst_c0;
6697 sp [-1]->opcode = OP_I8CONST;
6698 sp [-1]->type = STACK_I8;
6699 #if SIZEOF_VOID_P == 8
6700 if ((*ip) == CEE_CONV_U8)
6701 sp [-1]->inst_c0 = (guint32)data;
6703 sp [-1]->inst_c0 = data;
6705 sp [-1]->inst_ls_word = data;
6706 if ((*ip) == CEE_CONV_U8)
6707 sp [-1]->inst_ms_word = 0;
6709 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6711 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6718 case CEE_CONV_OVF_I4:
6719 case CEE_CONV_OVF_I1:
6720 case CEE_CONV_OVF_I2:
6721 case CEE_CONV_OVF_I:
6722 case CEE_CONV_OVF_U:
6725 if (sp [-1]->type == STACK_R8) {
6726 ADD_UNOP (CEE_CONV_OVF_I8);
6733 case CEE_CONV_OVF_U1:
6734 case CEE_CONV_OVF_U2:
6735 case CEE_CONV_OVF_U4:
6738 if (sp [-1]->type == STACK_R8) {
6739 ADD_UNOP (CEE_CONV_OVF_U8);
6746 case CEE_CONV_OVF_I1_UN:
6747 case CEE_CONV_OVF_I2_UN:
6748 case CEE_CONV_OVF_I4_UN:
6749 case CEE_CONV_OVF_I8_UN:
6750 case CEE_CONV_OVF_U1_UN:
6751 case CEE_CONV_OVF_U2_UN:
6752 case CEE_CONV_OVF_U4_UN:
6753 case CEE_CONV_OVF_U8_UN:
6754 case CEE_CONV_OVF_I_UN:
6755 case CEE_CONV_OVF_U_UN:
6765 case CEE_ADD_OVF_UN:
6767 case CEE_MUL_OVF_UN:
6769 case CEE_SUB_OVF_UN:
6777 token = read32 (ip + 1);
6778 klass = mini_get_class (method, token, generic_context);
6779 CHECK_TYPELOAD (klass);
6781 if (generic_class_is_reference_type (cfg, klass)) {
6782 MonoInst *store, *load;
6783 int dreg = alloc_preg (cfg);
6785 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6786 load->flags |= ins_flag;
6787 MONO_ADD_INS (cfg->cbb, load);
6789 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6790 store->flags |= ins_flag;
6791 MONO_ADD_INS (cfg->cbb, store);
6793 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6805 token = read32 (ip + 1);
6806 klass = mini_get_class (method, token, generic_context);
6807 CHECK_TYPELOAD (klass);
6809 /* Optimize the common ldobj+stloc combination */
6819 loc_index = ip [5] - CEE_STLOC_0;
6826 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6827 CHECK_LOCAL (loc_index);
6829 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6830 ins->dreg = cfg->locals [loc_index]->dreg;
6836 /* Optimize the ldobj+stobj combination */
6837 /* The reference case ends up being a load+store anyway */
6838 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6843 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6850 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6859 CHECK_STACK_OVF (1);
6861 n = read32 (ip + 1);
6863 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6864 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6865 ins->type = STACK_OBJ;
6868 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6869 MonoInst *iargs [1];
6871 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6872 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6874 if (cfg->opt & MONO_OPT_SHARED) {
6875 MonoInst *iargs [3];
6877 if (cfg->compile_aot) {
6878 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6880 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6881 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6882 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6883 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6884 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6886 if (bblock->out_of_line) {
6887 MonoInst *iargs [2];
6889 if (cfg->method->klass->image == mono_defaults.corlib) {
6891 * Avoid relocations in AOT and save some space by using a
6892 * version of helper_ldstr specialized to mscorlib.
6894 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6895 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6897 /* Avoid creating the string object */
6898 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6899 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6900 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6904 if (cfg->compile_aot) {
6905 NEW_LDSTRCONST (cfg, ins, image, n);
6907 MONO_ADD_INS (bblock, ins);
6910 NEW_PCONST (cfg, ins, NULL);
6911 ins->type = STACK_OBJ;
6912 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6914 MONO_ADD_INS (bblock, ins);
6923 MonoInst *iargs [2];
6924 MonoMethodSignature *fsig;
6929 token = read32 (ip + 1);
6930 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6933 fsig = mono_method_get_signature (cmethod, image, token);
6935 mono_save_token_info (cfg, image, token, cmethod);
6937 if (!mono_class_init (cmethod->klass))
6940 if (cfg->generic_sharing_context)
6941 context_used = mono_method_check_context_used (cmethod);
6943 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6944 if (check_linkdemand (cfg, method, cmethod))
6946 CHECK_CFG_EXCEPTION;
6947 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6948 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6951 n = fsig->param_count;
6955 * Generate smaller code for the common newobj <exception> instruction in
6956 * argument checking code.
6958 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6959 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6960 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6961 MonoInst *iargs [3];
6965 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6968 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6972 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6977 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6980 g_assert_not_reached ();
6988 /* move the args to allow room for 'this' in the first position */
6994 /* check_call_signature () requires sp[0] to be set */
6995 this_ins.type = STACK_OBJ;
6997 if (check_call_signature (cfg, fsig, sp))
7002 if (mini_class_is_system_array (cmethod->klass)) {
7003 g_assert (!context_used);
7004 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7005 if (fsig->param_count == 2)
7006 /* Avoid varargs in the common case */
7007 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7009 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7010 } else if (cmethod->string_ctor) {
7011 g_assert (!context_used);
7012 /* we simply pass a null pointer */
7013 EMIT_NEW_PCONST (cfg, *sp, NULL);
7014 /* now call the string ctor */
7015 alloc = mono_emit_method_call (cfg, cmethod, fsig, sp, NULL);
7017 MonoInst* callvirt_this_arg = NULL;
7019 if (cmethod->klass->valuetype) {
7020 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7021 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7022 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7027 * The code generated by mini_emit_virtual_call () expects
7028 * iargs [0] to be a boxed instance, but luckily the vcall
7029 * will be transformed into a normal call there.
7031 } else if (context_used) {
7032 MonoInst *rgctx, *data;
7035 EMIT_GET_RGCTX (rgctx, context_used);
7036 if (cfg->opt & MONO_OPT_SHARED)
7037 rgctx_info = MONO_RGCTX_INFO_KLASS;
7039 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7040 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7042 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7045 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7047 CHECK_TYPELOAD (cmethod->klass);
7050 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7051 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7052 * As a workaround, we call class cctors before allocating objects.
7054 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7055 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7056 mono_emit_native_call (cfg, tramp,
7057 helper_sig_class_init_trampoline,
7059 if (cfg->verbose_level > 2)
7060 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7061 class_inits = g_slist_prepend (class_inits, vtable);
7064 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7069 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7071 /* Now call the actual ctor */
7072 /* Avoid virtual calls to ctors if possible */
7073 if (cmethod->klass->marshalbyref)
7074 callvirt_this_arg = sp [0];
7076 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7077 mono_method_check_inlining (cfg, cmethod) &&
7078 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7079 !g_list_find (dont_inline, cmethod)) {
7082 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7083 cfg->real_offset += 5;
7086 inline_costs += costs - 5;
7089 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7091 } else if (context_used &&
7092 (cmethod->klass->valuetype ||
7093 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7094 MonoInst *rgctx, *cmethod_addr;
7096 g_assert (!callvirt_this_arg);
7098 EMIT_GET_RGCTX (rgctx, context_used);
7099 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7100 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7102 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7105 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7109 if (alloc == NULL) {
7111 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7112 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7126 token = read32 (ip + 1);
7127 klass = mini_get_class (method, token, generic_context);
7128 CHECK_TYPELOAD (klass);
7129 if (sp [0]->type != STACK_OBJ)
7132 if (cfg->generic_sharing_context)
7133 context_used = mono_class_check_context_used (klass);
7136 MonoInst *rgctx, *args [2];
7138 g_assert (!method->klass->valuetype);
7144 EMIT_GET_RGCTX (rgctx, context_used);
7145 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7146 MONO_RGCTX_INFO_KLASS);
7148 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7152 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7153 MonoMethod *mono_castclass;
7154 MonoInst *iargs [1];
7157 mono_castclass = mono_marshal_get_castclass (klass);
7160 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7161 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7162 g_assert (costs > 0);
7165 cfg->real_offset += 5;
7170 inline_costs += costs;
7173 ins = handle_castclass (cfg, klass, *sp);
7183 token = read32 (ip + 1);
7184 klass = mini_get_class (method, token, generic_context);
7185 CHECK_TYPELOAD (klass);
7186 if (sp [0]->type != STACK_OBJ)
7189 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
7190 GENERIC_SHARING_FAILURE (CEE_ISINST);
7192 if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7194 MonoMethod *mono_isinst;
7195 MonoInst *iargs [1];
7198 mono_isinst = mono_marshal_get_isinst (klass);
7201 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7202 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7203 g_assert (costs > 0);
7206 cfg->real_offset += 5;
7211 inline_costs += costs;
7214 ins = handle_isinst (cfg, klass, *sp);
7220 case CEE_UNBOX_ANY: {
7221 MonoInst *rgctx = NULL;
7226 token = read32 (ip + 1);
7227 klass = mini_get_class (method, token, generic_context);
7228 CHECK_TYPELOAD (klass);
7230 if (cfg->generic_sharing_context)
7231 context_used = mono_class_check_context_used (klass);
7233 if (generic_class_is_reference_type (cfg, klass)) {
7236 MonoInst *iargs [2];
7238 g_assert (!method->klass->valuetype);
7243 EMIT_GET_RGCTX (rgctx, context_used);
7244 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7245 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7249 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7250 MonoMethod *mono_castclass;
7251 MonoInst *iargs [1];
7254 mono_castclass = mono_marshal_get_castclass (klass);
7257 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7258 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7260 g_assert (costs > 0);
7263 cfg->real_offset += 5;
7267 inline_costs += costs;
7269 ins = handle_castclass (cfg, klass, *sp);
7278 EMIT_GET_RGCTX (rgctx, context_used);
7280 if (mono_class_is_nullable (klass)) {
7281 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7288 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7294 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7302 int context_used = 0;
7308 token = read32 (ip + 1);
7309 klass = mini_get_class (method, token, generic_context);
7310 CHECK_TYPELOAD (klass);
7312 if (cfg->generic_sharing_context) {
7313 context_used = mono_class_check_context_used (klass);
7315 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7316 GENERIC_SHARING_FAILURE (*ip);
7319 if (generic_class_is_reference_type (cfg, klass)) {
7325 if (klass == mono_defaults.void_class)
7327 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7329 /* frequent check in generic code: box (struct), brtrue */
7330 if (!mono_class_is_nullable (klass) &&
7331 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7332 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7334 MONO_INST_NEW (cfg, ins, OP_BR);
7335 if (*ip == CEE_BRTRUE_S) {
7338 target = ip + 1 + (signed char)(*ip);
7343 target = ip + 4 + (gint)(read32 (ip));
7346 GET_BBLOCK (cfg, tblock, target);
7347 link_bblock (cfg, bblock, tblock);
7348 CHECK_BBLOCK (target, ip, tblock);
7349 ins->inst_target_bb = tblock;
7350 GET_BBLOCK (cfg, tblock, ip);
7352 * This leads to some inconsistency, since the two bblocks are not
7353 * really connected, but it is needed for handling stack arguments
7354 * correct (See test_0_box_brtrue_opt_regress_81102).
7356 link_bblock (cfg, bblock, tblock);
7357 if (sp != stack_start) {
7358 handle_stack_args (cfg, stack_start, sp - stack_start);
7360 CHECK_UNVERIFIABLE (cfg);
7362 MONO_ADD_INS (bblock, ins);
7363 start_new_bblock = 1;
7370 if (mono_class_is_nullable (klass)) {
7371 GENERIC_SHARING_FAILURE (CEE_BOX);
7376 EMIT_GET_RGCTX (rgctx, context_used);
7377 if (cfg->opt & MONO_OPT_SHARED)
7378 rgctx_info = MONO_RGCTX_INFO_KLASS;
7380 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7381 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7382 *sp++ = handle_box_from_inst (cfg, val, klass, data);
7385 *sp++ = handle_box (cfg, val, klass);
7393 MonoInst *rgctx = NULL;
7398 token = read32 (ip + 1);
7399 klass = mini_get_class (method, token, generic_context);
7400 CHECK_TYPELOAD (klass);
7402 if (cfg->generic_sharing_context)
7403 context_used = mono_class_check_context_used (klass);
7406 EMIT_GET_RGCTX (rgctx, context_used);
7408 if (mono_class_is_nullable (klass)) {
7411 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7412 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7416 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7426 MonoClassField *field;
7430 if (*ip == CEE_STFLD) {
7437 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7439 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7442 token = read32 (ip + 1);
7443 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7444 field = mono_method_get_wrapper_data (method, token);
7445 klass = field->parent;
7448 field = mono_field_from_token (image, token, &klass, generic_context);
7452 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7453 FIELD_ACCESS_FAILURE;
7454 mono_class_init (klass);
7456 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7457 if (*ip == CEE_STFLD) {
7458 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7460 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7461 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7462 MonoInst *iargs [5];
7465 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7466 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7467 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7471 if (cfg->opt & MONO_OPT_INLINE) {
7472 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7473 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7474 g_assert (costs > 0);
7477 cfg->real_offset += 5;
7480 inline_costs += costs;
7483 mono_emit_method_call (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper), iargs, NULL);
7488 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7490 store->flags |= ins_flag;
7497 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7498 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7499 MonoInst *iargs [4];
7502 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7503 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7504 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7505 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7506 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7507 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7509 g_assert (costs > 0);
7512 cfg->real_offset += 5;
7516 inline_costs += costs;
7519 ins = mono_emit_method_call (cfg, wrapper, mono_method_signature (wrapper), iargs, NULL);
7523 if (sp [0]->type == STACK_VTYPE) {
7526 /* Have to compute the address of the variable */
7528 var = cfg->vreg_to_inst [sp [0]->dreg];
7529 if (!var && sp [0]->opcode == OP_VMOVE)
7530 var = cfg->vreg_to_inst [sp [0]->sreg1];
7533 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7537 if (*ip == CEE_LDFLDA) {
7538 dreg = alloc_preg (cfg);
7540 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7541 ins->klass = mono_class_from_mono_type (field->type);
7542 ins->type = STACK_MP;
7547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7548 load->flags |= ins_flag;
7559 MonoClassField *field;
7560 gpointer addr = NULL;
7561 gboolean is_special_static;
7564 token = read32 (ip + 1);
7566 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7567 field = mono_method_get_wrapper_data (method, token);
7568 klass = field->parent;
7571 field = mono_field_from_token (image, token, &klass, generic_context);
7574 mono_class_init (klass);
7575 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7576 FIELD_ACCESS_FAILURE;
7579 * We can only support shared generic static
7580 * field access on architectures where the
7581 * trampoline code has been extended to handle
7582 * the generic class init.
7584 #ifndef MONO_ARCH_VTABLE_REG
7585 GENERIC_SHARING_FAILURE (*ip);
7588 if (cfg->generic_sharing_context)
7589 context_used = mono_class_check_context_used (klass);
7591 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7593 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7594 * to be called here.
7596 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7597 mono_class_vtable (cfg->domain, klass);
7598 CHECK_TYPELOAD (klass);
7600 mono_domain_lock (cfg->domain);
7601 if (cfg->domain->special_static_fields)
7602 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7603 mono_domain_unlock (cfg->domain);
7605 is_special_static = mono_class_field_is_special_static (field);
7607 /* Generate IR to compute the field address */
7609 if ((cfg->opt & MONO_OPT_SHARED) ||
7610 (cfg->compile_aot && is_special_static) ||
7611 (context_used && is_special_static)) {
7612 MonoInst *iargs [2];
7614 g_assert (field->parent);
7615 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7619 EMIT_GET_RGCTX (rgctx, context_used);
7620 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7622 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7624 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7625 } else if (context_used) {
7626 MonoInst *rgctx, *static_data;
7629 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7630 method->klass->name_space, method->klass->name, method->name,
7631 depth, field->offset);
7634 if (mono_class_needs_cctor_run (klass, method)) {
7636 MonoInst *vtable, *rgctx;
7638 EMIT_GET_RGCTX (rgctx, context_used);
7639 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7641 // FIXME: This doesn't work since it tries to pass the argument
7642 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7644 * The vtable pointer is always passed in a register regardless of
7645 * the calling convention, so assign it manually, and make a call
7646 * using a signature without parameters.
7648 call = (MonoCallInst*)mono_emit_native_call (cfg, mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT), helper_sig_generic_class_init_trampoline, &vtable);
7649 #ifdef MONO_ARCH_VTABLE_REG
7650 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7657 * The pointer we're computing here is
7659 * super_info.static_data + field->offset
7661 EMIT_GET_RGCTX (rgctx, context_used);
7662 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7664 if (field->offset == 0) {
7667 int addr_reg = mono_alloc_preg (cfg);
7668 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7670 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7671 MonoInst *iargs [2];
7673 g_assert (field->parent);
7674 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7675 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7676 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7678 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7680 CHECK_TYPELOAD (klass);
7682 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7683 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7684 mono_emit_native_call (cfg, tramp,
7685 helper_sig_class_init_trampoline,
7687 if (cfg->verbose_level > 2)
7688 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7689 class_inits = g_slist_prepend (class_inits, vtable);
7691 if (cfg->run_cctors) {
7693 /* This makes so that inline cannot trigger */
7694 /* .cctors: too many apps depend on them */
7695 /* running with a specific order... */
7696 if (! vtable->initialized)
7698 ex = mono_runtime_class_init_full (vtable, FALSE);
7700 set_exception_object (cfg, ex);
7701 goto exception_exit;
7705 addr = (char*)vtable->data + field->offset;
7707 if (cfg->compile_aot)
7708 EMIT_NEW_SFLDACONST (cfg, ins, field);
7710 EMIT_NEW_PCONST (cfg, ins, addr);
7713 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7714 * This could be later optimized to do just a couple of
7715 * memory dereferences with constant offsets.
7717 MonoInst *iargs [1];
7718 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7719 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7723 /* Generate IR to do the actual load/store operation */
7725 if (*ip == CEE_LDSFLDA) {
7726 ins->klass = mono_class_from_mono_type (field->type);
7728 } else if (*ip == CEE_STSFLD) {
7733 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7734 store->flags |= ins_flag;
7737 gboolean is_const = FALSE;
7738 MonoVTable *vtable = NULL;
7740 if (!context_used) {
7741 vtable = mono_class_vtable (cfg->domain, klass);
7742 CHECK_TYPELOAD (klass);
7744 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7745 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7746 gpointer addr = (char*)vtable->data + field->offset;
7747 int ro_type = field->type->type;
7748 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7749 ro_type = field->type->data.klass->enum_basetype->type;
7751 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7754 case MONO_TYPE_BOOLEAN:
7756 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7760 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7763 case MONO_TYPE_CHAR:
7765 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7769 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7774 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7778 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7783 case MONO_TYPE_STRING:
7784 case MONO_TYPE_OBJECT:
7785 case MONO_TYPE_CLASS:
7786 case MONO_TYPE_SZARRAY:
7788 case MONO_TYPE_FNPTR:
7789 case MONO_TYPE_ARRAY:
7790 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7791 type_to_eval_stack_type ((cfg), field->type, *sp);
7796 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7801 case MONO_TYPE_VALUETYPE:
7811 CHECK_STACK_OVF (1);
7813 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7814 load->flags |= ins_flag;
7818 /* fixme: dont see the problem why this does not work */
7819 //cfg->disable_aot = TRUE;
7829 token = read32 (ip + 1);
7830 klass = mini_get_class (method, token, generic_context);
7831 CHECK_TYPELOAD (klass);
7832 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7844 const char *data_ptr;
7846 gboolean shared_access = FALSE;
7852 token = read32 (ip + 1);
7854 klass = mini_get_class (method, token, generic_context);
7855 CHECK_TYPELOAD (klass);
7857 if (cfg->generic_sharing_context) {
7858 int context_used = mono_class_check_context_used (klass);
7860 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7861 GENERIC_SHARING_FAILURE (CEE_NEWARR);
7864 shared_access = TRUE;
7867 if (shared_access) {
7871 /* FIXME: Decompose later to help abcrem */
7874 EMIT_NEW_DOMAINCONST (cfg, args [0]);
7877 EMIT_GET_RGCTX (rgctx, context_used);
7878 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7883 ins = mono_emit_jit_icall (cfg, mono_array_new, args);
7885 if (cfg->opt & MONO_OPT_SHARED) {
7886 /* Decompose now to avoid problems with references to the domainvar */
7887 MonoInst *iargs [3];
7889 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7890 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7893 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7895 /* Decompose later since it is needed by abcrem */
7896 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7897 ins->dreg = alloc_preg (cfg);
7898 ins->sreg1 = sp [0]->dreg;
7899 ins->inst_newa_class = klass;
7900 ins->type = STACK_OBJ;
7902 MONO_ADD_INS (cfg->cbb, ins);
7903 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7904 cfg->cbb->has_array_access = TRUE;
7906 /* Needed so mono_emit_load_get_addr () gets called */
7907 mono_get_got_var (cfg);
7917 * we inline/optimize the initialization sequence if possible.
7918 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7919 * for small sizes open code the memcpy
7920 * ensure the rva field is big enough
7922 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7923 MonoMethod *memcpy_method = get_memcpy_method ();
7924 MonoInst *iargs [3];
7925 int add_reg = alloc_preg (cfg);
7927 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7928 if (cfg->compile_aot) {
7929 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7931 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7933 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7934 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
7943 if (sp [0]->type != STACK_OBJ)
7946 dreg = alloc_preg (cfg);
7947 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7948 ins->dreg = alloc_preg (cfg);
7949 ins->sreg1 = sp [0]->dreg;
7950 ins->type = STACK_I4;
7951 MONO_ADD_INS (cfg->cbb, ins);
7952 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7953 cfg->cbb->has_array_access = TRUE;
7961 if (sp [0]->type != STACK_OBJ)
7964 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7966 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7967 CHECK_TYPELOAD (klass);
7968 /* we need to make sure that this array is exactly the type it needs
7969 * to be for correctness. the wrappers are lax with their usage
7970 * so we need to ignore them here
7972 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7973 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7976 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7980 case CEE_LDELEM_ANY:
7991 case CEE_LDELEM_REF: {
7997 if (*ip == CEE_LDELEM_ANY) {
7999 token = read32 (ip + 1);
8000 klass = mini_get_class (method, token, generic_context);
8001 CHECK_TYPELOAD (klass);
8002 mono_class_init (klass);
8005 klass = array_access_to_klass (*ip);
8007 if (sp [0]->type != STACK_OBJ)
8010 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8012 if (sp [1]->opcode == OP_ICONST) {
8013 int array_reg = sp [0]->dreg;
8014 int index_reg = sp [1]->dreg;
8015 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8017 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8018 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8020 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8024 if (*ip == CEE_LDELEM_ANY)
8037 case CEE_STELEM_REF:
8038 case CEE_STELEM_ANY: {
8044 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8046 if (*ip == CEE_STELEM_ANY) {
8048 token = read32 (ip + 1);
8049 klass = mini_get_class (method, token, generic_context);
8050 CHECK_TYPELOAD (klass);
8051 mono_class_init (klass);
8054 klass = array_access_to_klass (*ip);
8056 if (sp [0]->type != STACK_OBJ)
8059 /* storing a NULL doesn't need any of the complex checks in stelemref */
8060 if (generic_class_is_reference_type (cfg, klass) &&
8061 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8062 MonoMethod* helper = mono_marshal_get_stelemref ();
8063 MonoInst *iargs [3];
8065 if (sp [0]->type != STACK_OBJ)
8067 if (sp [2]->type != STACK_OBJ)
8074 mono_emit_method_call (cfg, helper, mono_method_signature (helper), iargs, NULL);
8076 if (sp [1]->opcode == OP_ICONST) {
8077 int array_reg = sp [0]->dreg;
8078 int index_reg = sp [1]->dreg;
8079 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8081 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8082 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8084 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8085 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8089 if (*ip == CEE_STELEM_ANY)
8096 case CEE_CKFINITE: {
8100 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8101 ins->sreg1 = sp [0]->dreg;
8102 ins->dreg = alloc_freg (cfg);
8103 ins->type = STACK_R8;
8104 MONO_ADD_INS (bblock, ins);
8110 case CEE_REFANYVAL: {
8111 MonoInst *src_var, *src;
8112 int context_used = 0;
8114 int klass_reg = alloc_preg (cfg);
8115 int dreg = alloc_preg (cfg);
8118 MONO_INST_NEW (cfg, ins, *ip);
8121 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8122 CHECK_TYPELOAD (klass);
8123 mono_class_init (klass);
8125 if (cfg->generic_sharing_context) {
8126 context_used = mono_class_check_context_used (klass);
8127 if (context_used && cfg->compile_aot)
8128 GENERIC_SHARING_FAILURE (*ip);
8133 GENERIC_SHARING_FAILURE (*ip);
8136 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8138 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8139 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8141 mini_emit_class_check (cfg, klass_reg, klass);
8142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8144 ins->type = STACK_MP;
8149 case CEE_MKREFANY: {
8150 MonoInst *loc, *addr;
8151 int context_used = 0;
8154 MONO_INST_NEW (cfg, ins, *ip);
8157 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8158 CHECK_TYPELOAD (klass);
8159 mono_class_init (klass);
8161 if (cfg->generic_sharing_context) {
8162 context_used = mono_class_check_context_used (klass);
8163 if (context_used && cfg->compile_aot)
8164 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8167 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8168 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8171 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8172 } else if (cfg->compile_aot) {
8173 int const_reg = alloc_preg (cfg);
8174 int type_reg = alloc_preg (cfg);
8176 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8181 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8184 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8186 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8187 ins->type = STACK_VTYPE;
8188 ins->klass = mono_defaults.typed_reference_class;
8195 MonoClass *handle_class;
8196 int context_used = 0;
8198 CHECK_STACK_OVF (1);
8201 n = read32 (ip + 1);
8203 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8204 handle = mono_method_get_wrapper_data (method, n);
8205 handle_class = mono_method_get_wrapper_data (method, n + 1);
8206 if (handle_class == mono_defaults.typehandle_class)
8207 handle = &((MonoClass*)handle)->byval_arg;
8210 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8214 mono_class_init (handle_class);
8215 if (cfg->generic_sharing_context) {
8216 if (handle_class == mono_defaults.typehandle_class) {
8217 /* If we get a MONO_TYPE_CLASS
8218 then we need to provide the
8220 instantiation of it. */
8221 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8224 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8225 } else if (handle_class == mono_defaults.fieldhandle_class)
8226 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8227 else if (handle_class == mono_defaults.methodhandle_class)
8228 context_used = mono_method_check_context_used (handle);
8230 g_assert_not_reached ();
8233 if (cfg->opt & MONO_OPT_SHARED) {
8234 MonoInst *addr, *vtvar, *iargs [3];
8235 int method_context_used;
8237 if (cfg->generic_sharing_context)
8238 method_context_used = mono_method_check_context_used (method);
8240 method_context_used = 0;
8242 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8244 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8245 EMIT_NEW_ICONST (cfg, iargs [1], n);
8246 if (method_context_used) {
8249 EMIT_GET_RGCTX (rgctx, method_context_used);
8250 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8251 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8253 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8254 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8256 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8258 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8260 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8262 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8263 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8264 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8265 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8266 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8267 MonoClass *tclass = mono_class_from_mono_type (handle);
8269 mono_class_init (tclass);
8273 g_assert (!cfg->compile_aot);
8274 EMIT_GET_RGCTX (rgctx, context_used);
8275 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8276 } else if (cfg->compile_aot) {
8278 * FIXME: We would have to include the context into the
8279 * aot constant too (tests/generic-array-type.2.exe).
8281 if (generic_context)
8282 cfg->disable_aot = TRUE;
8283 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8285 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8287 ins->type = STACK_OBJ;
8288 ins->klass = cmethod->klass;
8291 MonoInst *addr, *vtvar;
8293 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8298 g_assert (!cfg->compile_aot);
8300 EMIT_GET_RGCTX (rgctx, context_used);
8301 if (handle_class == mono_defaults.typehandle_class) {
8302 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8303 mono_class_from_mono_type (handle),
8304 MONO_RGCTX_INFO_TYPE);
8305 } else if (handle_class == mono_defaults.methodhandle_class) {
8306 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8307 handle, MONO_RGCTX_INFO_METHOD);
8308 } else if (handle_class == mono_defaults.fieldhandle_class) {
8309 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8310 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8312 g_assert_not_reached ();
8314 } else if (cfg->compile_aot) {
8315 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8317 EMIT_NEW_PCONST (cfg, ins, handle);
8319 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8320 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8321 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8331 MONO_INST_NEW (cfg, ins, OP_THROW);
8333 ins->sreg1 = sp [0]->dreg;
8335 bblock->out_of_line = TRUE;
8336 MONO_ADD_INS (bblock, ins);
8337 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8338 MONO_ADD_INS (bblock, ins);
8341 link_bblock (cfg, bblock, end_bblock);
8342 start_new_bblock = 1;
8344 case CEE_ENDFINALLY:
8345 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8346 MONO_ADD_INS (bblock, ins);
8348 start_new_bblock = 1;
8351 * Control will leave the method so empty the stack, otherwise
8352 * the next basic block will start with a nonempty stack.
8354 while (sp != stack_start) {
8362 if (*ip == CEE_LEAVE) {
8364 target = ip + 5 + (gint32)read32(ip + 1);
8367 target = ip + 2 + (signed char)(ip [1]);
8370 /* empty the stack */
8371 while (sp != stack_start) {
8376 * If this leave statement is in a catch block, check for a
8377 * pending exception, and rethrow it if necessary.
8379 for (i = 0; i < header->num_clauses; ++i) {
8380 MonoExceptionClause *clause = &header->clauses [i];
8383 * Use <= in the final comparison to handle clauses with multiple
8384 * leave statements, like in bug #78024.
8385 * The ordering of the exception clauses guarantees that we find the
8388 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8390 MonoBasicBlock *dont_throw;
8395 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8398 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8400 NEW_BBLOCK (cfg, dont_throw);
8403 * Currently, we allways rethrow the abort exception, despite the
8404 * fact that this is not correct. See thread6.cs for an example.
8405 * But propagating the abort exception is more important than
8406 * getting the sematics right.
8408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8409 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8410 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8412 MONO_START_BB (cfg, dont_throw);
8417 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8419 for (tmp = handlers; tmp; tmp = tmp->next) {
8421 link_bblock (cfg, bblock, tblock);
8422 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8423 ins->inst_target_bb = tblock;
8424 MONO_ADD_INS (bblock, ins);
8426 g_list_free (handlers);
8429 MONO_INST_NEW (cfg, ins, OP_BR);
8430 MONO_ADD_INS (bblock, ins);
8431 GET_BBLOCK (cfg, tblock, target);
8432 link_bblock (cfg, bblock, tblock);
8433 CHECK_BBLOCK (target, ip, tblock);
8434 ins->inst_target_bb = tblock;
8435 start_new_bblock = 1;
8437 if (*ip == CEE_LEAVE)
8446 * Mono specific opcodes
8448 case MONO_CUSTOM_PREFIX: {
8450 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8454 case CEE_MONO_ICALL: {
8456 MonoJitICallInfo *info;
8458 token = read32 (ip + 2);
8459 func = mono_method_get_wrapper_data (method, token);
8460 info = mono_find_jit_icall_by_addr (func);
8463 CHECK_STACK (info->sig->param_count);
8464 sp -= info->sig->param_count;
8466 ins = mono_emit_jit_icall (cfg, info->func, sp);
8467 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8471 inline_costs += 10 * num_calls++;
8475 case CEE_MONO_LDPTR: {
8478 CHECK_STACK_OVF (1);
8480 token = read32 (ip + 2);
8482 ptr = mono_method_get_wrapper_data (method, token);
8483 if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
8484 MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
8486 if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
8487 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, wrapped);
8493 if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8494 MonoJitICallInfo *callinfo;
8495 const char *icall_name;
8497 icall_name = method->name + strlen ("__icall_wrapper_");
8498 g_assert (icall_name);
8499 callinfo = mono_find_jit_icall_by_name (icall_name);
8500 g_assert (callinfo);
8502 if (ptr == callinfo->func) {
8503 /* Will be transformed into an AOTCONST later */
8504 EMIT_NEW_PCONST (cfg, ins, ptr);
8511 /* FIXME: Generalize this */
8512 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8513 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8518 EMIT_NEW_PCONST (cfg, ins, ptr);
8521 inline_costs += 10 * num_calls++;
8522 /* Can't embed random pointers into AOT code */
8523 cfg->disable_aot = 1;
8526 case CEE_MONO_VTADDR: {
8527 MonoInst *src_var, *src;
8533 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8534 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8539 case CEE_MONO_NEWOBJ: {
8540 MonoInst *iargs [2];
8542 CHECK_STACK_OVF (1);
8544 token = read32 (ip + 2);
8545 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8546 mono_class_init (klass);
8547 NEW_DOMAINCONST (cfg, iargs [0]);
8548 MONO_ADD_INS (cfg->cbb, iargs [0]);
8549 NEW_CLASSCONST (cfg, iargs [1], klass);
8550 MONO_ADD_INS (cfg->cbb, iargs [1]);
8551 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8553 inline_costs += 10 * num_calls++;
8556 case CEE_MONO_OBJADDR:
8559 MONO_INST_NEW (cfg, ins, OP_MOVE);
8560 ins->dreg = alloc_preg (cfg);
8561 ins->sreg1 = sp [0]->dreg;
8562 ins->type = STACK_MP;
8563 MONO_ADD_INS (cfg->cbb, ins);
8567 case CEE_MONO_LDNATIVEOBJ:
8569 * Similar to LDOBJ, but instead load the unmanaged
8570 * representation of the vtype to the stack.
8575 token = read32 (ip + 2);
8576 klass = mono_method_get_wrapper_data (method, token);
8577 g_assert (klass->valuetype);
8578 mono_class_init (klass);
8581 MonoInst *src, *dest, *temp;
8584 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8585 temp->backend.is_pinvoke = 1;
8586 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8587 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8589 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8590 dest->type = STACK_VTYPE;
8591 dest->klass = klass;
8597 case CEE_MONO_RETOBJ: {
8599 * Same as RET, but return the native representation of a vtype
8602 g_assert (cfg->ret);
8603 g_assert (mono_method_signature (method)->pinvoke);
8608 token = read32 (ip + 2);
8609 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8611 if (!cfg->vret_addr) {
8612 g_assert (cfg->ret_var_is_local);
8614 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8616 EMIT_NEW_RETLOADA (cfg, ins);
8618 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8620 if (sp != stack_start)
8623 MONO_INST_NEW (cfg, ins, OP_BR);
8624 ins->inst_target_bb = end_bblock;
8625 MONO_ADD_INS (bblock, ins);
8626 link_bblock (cfg, bblock, end_bblock);
8627 start_new_bblock = 1;
8631 case CEE_MONO_CISINST:
8632 case CEE_MONO_CCASTCLASS: {
8637 token = read32 (ip + 2);
8638 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8639 if (ip [1] == CEE_MONO_CISINST)
8640 ins = handle_cisinst (cfg, klass, sp [0]);
8642 ins = handle_ccastclass (cfg, klass, sp [0]);
8648 case CEE_MONO_SAVE_LMF:
8649 case CEE_MONO_RESTORE_LMF:
8650 #ifdef MONO_ARCH_HAVE_LMF_OPS
8651 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8652 MONO_ADD_INS (bblock, ins);
8653 cfg->need_lmf_area = TRUE;
8657 case CEE_MONO_CLASSCONST:
8658 CHECK_STACK_OVF (1);
8660 token = read32 (ip + 2);
8661 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8664 inline_costs += 10 * num_calls++;
8666 case CEE_MONO_NOT_TAKEN:
8667 bblock->out_of_line = TRUE;
8671 CHECK_STACK_OVF (1);
8673 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8674 ins->dreg = alloc_preg (cfg);
8675 ins->inst_offset = (gint32)read32 (ip + 2);
8676 ins->type = STACK_PTR;
8677 MONO_ADD_INS (bblock, ins);
8682 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8692 /* somewhat similar to LDTOKEN */
8693 MonoInst *addr, *vtvar;
8694 CHECK_STACK_OVF (1);
8695 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8697 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8698 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8700 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8701 ins->type = STACK_VTYPE;
8702 ins->klass = mono_defaults.argumenthandle_class;
8715 * The following transforms:
8716 * CEE_CEQ into OP_CEQ
8717 * CEE_CGT into OP_CGT
8718 * CEE_CGT_UN into OP_CGT_UN
8719 * CEE_CLT into OP_CLT
8720 * CEE_CLT_UN into OP_CLT_UN
8722 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8724 MONO_INST_NEW (cfg, ins, cmp->opcode);
8726 cmp->sreg1 = sp [0]->dreg;
8727 cmp->sreg2 = sp [1]->dreg;
8728 type_from_op (cmp, sp [0], sp [1]);
8730 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8731 cmp->opcode = OP_LCOMPARE;
8732 else if (sp [0]->type == STACK_R8)
8733 cmp->opcode = OP_FCOMPARE;
8735 cmp->opcode = OP_ICOMPARE;
8736 MONO_ADD_INS (bblock, cmp);
8737 ins->type = STACK_I4;
8738 ins->dreg = alloc_dreg (cfg, ins->type);
8739 type_from_op (ins, sp [0], sp [1]);
8741 if (cmp->opcode == OP_FCOMPARE) {
8743 * The backends expect the fceq opcodes to do the
8746 cmp->opcode = OP_NOP;
8747 ins->sreg1 = cmp->sreg1;
8748 ins->sreg2 = cmp->sreg2;
8750 MONO_ADD_INS (bblock, ins);
8757 MonoMethod *cil_method, *ctor_method;
8760 CHECK_STACK_OVF (1);
8762 n = read32 (ip + 2);
8763 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8766 mono_class_init (cmethod->klass);
8768 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8769 GENERIC_SHARING_FAILURE (CEE_LDFTN);
8771 is_shared = (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8772 (cmethod->klass->generic_class || cmethod->klass->generic_container) &&
8773 mono_class_generic_sharing_enabled (cmethod->klass);
8775 cil_method = cmethod;
8776 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8777 METHOD_ACCESS_FAILURE;
8779 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8780 if (check_linkdemand (cfg, method, cmethod))
8782 CHECK_CFG_EXCEPTION;
8783 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8784 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8788 * Optimize the common case of ldftn+delegate creation
8790 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8791 /* FIXME: SGEN support */
8792 if (!is_shared && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8793 MonoInst *target_ins;
8796 if (cfg->verbose_level > 3)
8797 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8798 target_ins = sp [-1];
8800 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8808 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8810 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8811 if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
8812 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8814 ins = mono_emit_jit_icall (cfg, mono_ldftn_nosync, &argconst);
8818 inline_costs += 10 * num_calls++;
8821 case CEE_LDVIRTFTN: {
8826 n = read32 (ip + 2);
8827 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8830 mono_class_init (cmethod->klass);
8832 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8833 GENERIC_SHARING_FAILURE (CEE_LDVIRTFTN);
8835 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8836 if (check_linkdemand (cfg, method, cmethod))
8838 CHECK_CFG_EXCEPTION;
8839 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8840 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8845 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8846 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8849 inline_costs += 10 * num_calls++;
8853 CHECK_STACK_OVF (1);
8855 n = read16 (ip + 2);
8857 EMIT_NEW_ARGLOAD (cfg, ins, n);
8862 CHECK_STACK_OVF (1);
8864 n = read16 (ip + 2);
8866 NEW_ARGLOADA (cfg, ins, n);
8867 MONO_ADD_INS (cfg->cbb, ins);
8875 n = read16 (ip + 2);
8877 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8879 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8883 CHECK_STACK_OVF (1);
8885 n = read16 (ip + 2);
8887 EMIT_NEW_LOCLOAD (cfg, ins, n);
8892 CHECK_STACK_OVF (1);
8894 n = read16 (ip + 2);
8896 EMIT_NEW_LOCLOADA (cfg, ins, n);
8904 n = read16 (ip + 2);
8906 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8908 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8915 if (sp != stack_start)
8917 if (cfg->method != method)
8919 * Inlining this into a loop in a parent could lead to
8920 * stack overflows which is different behavior than the
8921 * non-inlined case, thus disable inlining in this case.
8923 goto inline_failure;
8925 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8926 ins->dreg = alloc_preg (cfg);
8927 ins->sreg1 = sp [0]->dreg;
8928 ins->type = STACK_PTR;
8929 MONO_ADD_INS (cfg->cbb, ins);
8931 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8932 if (header->init_locals)
8933 ins->flags |= MONO_INST_INIT;
8938 case CEE_ENDFILTER: {
8939 MonoExceptionClause *clause, *nearest;
8940 int cc, nearest_num;
8944 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8946 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8947 ins->sreg1 = (*sp)->dreg;
8948 MONO_ADD_INS (bblock, ins);
8949 start_new_bblock = 1;
8954 for (cc = 0; cc < header->num_clauses; ++cc) {
8955 clause = &header->clauses [cc];
8956 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8957 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8958 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8964 if ((ip - header->code) != nearest->handler_offset)
8969 case CEE_UNALIGNED_:
8970 ins_flag |= MONO_INST_UNALIGNED;
8971 /* FIXME: record alignment? we can assume 1 for now */
8976 ins_flag |= MONO_INST_VOLATILE;
8980 ins_flag |= MONO_INST_TAILCALL;
8981 cfg->flags |= MONO_CFG_HAS_TAIL;
8982 /* Can't inline tail calls at this time */
8983 inline_costs += 100000;
8990 token = read32 (ip + 2);
8991 klass = mini_get_class (method, token, generic_context);
8992 CHECK_TYPELOAD (klass);
8993 if (generic_class_is_reference_type (cfg, klass)) {
8994 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
8996 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
8997 mini_emit_initobj (cfg, *sp, NULL, klass);
9002 case CEE_CONSTRAINED_:
9004 token = read32 (ip + 2);
9005 constrained_call = mono_class_get_full (image, token, generic_context);
9006 CHECK_TYPELOAD (constrained_call);
9011 MonoInst *iargs [3];
9015 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9016 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9017 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9018 /* emit_memset only works when val == 0 */
9019 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9024 if (ip [1] == CEE_CPBLK) {
9025 MonoMethod *memcpy_method = get_memcpy_method ();
9026 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
9028 MonoMethod *memset_method = get_memset_method ();
9029 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
9039 ins_flag |= MONO_INST_NOTYPECHECK;
9041 ins_flag |= MONO_INST_NORANGECHECK;
9042 /* we ignore the no-nullcheck for now since we
9043 * really do it explicitly only when doing callvirt->call
9049 int handler_offset = -1;
9051 for (i = 0; i < header->num_clauses; ++i) {
9052 MonoExceptionClause *clause = &header->clauses [i];
9053 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY))
9054 handler_offset = clause->handler_offset;
9057 bblock->flags |= BB_EXCEPTION_UNSAFE;
9059 g_assert (handler_offset != -1);
9061 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9062 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9063 ins->sreg1 = load->dreg;
9064 MONO_ADD_INS (bblock, ins);
9066 link_bblock (cfg, bblock, end_bblock);
9067 start_new_bblock = 1;
9075 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9077 CHECK_STACK_OVF (1);
9079 token = read32 (ip + 2);
9080 /* FIXXME: handle generics. */
9081 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9082 MonoType *type = mono_type_create_from_typespec (image, token);
9083 token = mono_type_size (type, &ialign);
9085 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9086 CHECK_TYPELOAD (klass);
9087 mono_class_init (klass);
9088 token = mono_class_value_size (klass, &align);
9090 EMIT_NEW_ICONST (cfg, ins, token);
9095 case CEE_REFANYTYPE: {
9096 MonoInst *src_var, *src;
9102 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9104 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9105 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9106 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9116 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9121 g_error ("opcode 0x%02x not handled", *ip);
9124 if (start_new_bblock != 1)
9127 bblock->cil_length = ip - bblock->cil_code;
9128 bblock->next_bb = end_bblock;
9130 if (cfg->method == method && cfg->domainvar) {
9132 MonoInst *get_domain;
9134 cfg->cbb = init_localsbb;
9136 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9137 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9140 get_domain->dreg = alloc_preg (cfg);
9141 MONO_ADD_INS (cfg->cbb, get_domain);
9143 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9144 MONO_ADD_INS (cfg->cbb, store);
9147 if (cfg->method == method && cfg->got_var)
9148 mono_emit_load_got_addr (cfg);
9150 if (header->init_locals) {
9153 cfg->cbb = init_localsbb;
9154 cfg->ip = header->code;
9155 for (i = 0; i < header->num_locals; ++i) {
9156 MonoType *ptype = header->locals [i];
9157 int t = ptype->type;
9158 dreg = cfg->locals [i]->dreg;
9160 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9161 t = ptype->data.klass->enum_basetype->type;
9163 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9164 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9165 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9166 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9167 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9168 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9169 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9170 ins->type = STACK_R8;
9171 ins->inst_p0 = (void*)&r8_0;
9172 ins->dreg = alloc_dreg (cfg, STACK_R8);
9173 MONO_ADD_INS (init_localsbb, ins);
9174 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9175 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9176 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9177 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9179 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9186 /* resolve backward branches in the middle of an existing basic block */
9187 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9189 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9190 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9191 if (tblock != start_bblock) {
9193 split_bblock (cfg, tblock, bblock);
9194 l = bblock->cil_code - header->code;
9195 bblock->cil_length = tblock->cil_length - l;
9196 tblock->cil_length = l;
9198 printf ("recheck failed.\n");
9202 if (cfg->method == method) {
9204 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9205 bb->region = mono_find_block_region (cfg, bb->real_offset);
9207 mono_create_spvar_for_region (cfg, bb->region);
9208 if (cfg->verbose_level > 2)
9209 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9213 g_slist_free (class_inits);
9214 dont_inline = g_list_remove (dont_inline, method);
9216 if (inline_costs < 0) {
9219 /* Method is too large */
9220 mname = mono_method_full_name (method, TRUE);
9221 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9222 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9227 if ((cfg->verbose_level > 1) && (cfg->method == method))
9228 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9230 return inline_costs;
9233 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9234 g_slist_free (class_inits);
9235 dont_inline = g_list_remove (dont_inline, method);
9239 g_slist_free (class_inits);
9240 dont_inline = g_list_remove (dont_inline, method);
9244 g_slist_free (class_inits);
9245 dont_inline = g_list_remove (dont_inline, method);
9246 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9250 g_slist_free (class_inits);
9251 dont_inline = g_list_remove (dont_inline, method);
9252 set_exception_type_from_invalid_il (cfg, method, ip);
9257 store_membase_reg_to_store_membase_imm (int opcode)
9260 case OP_STORE_MEMBASE_REG:
9261 return OP_STORE_MEMBASE_IMM;
9262 case OP_STOREI1_MEMBASE_REG:
9263 return OP_STOREI1_MEMBASE_IMM;
9264 case OP_STOREI2_MEMBASE_REG:
9265 return OP_STOREI2_MEMBASE_IMM;
9266 case OP_STOREI4_MEMBASE_REG:
9267 return OP_STOREI4_MEMBASE_IMM;
9268 case OP_STOREI8_MEMBASE_REG:
9269 return OP_STOREI8_MEMBASE_IMM;
9271 g_assert_not_reached ();
9278 mono_op_to_op_imm (int opcode)
9288 return OP_IDIV_UN_IMM;
9292 return OP_IREM_UN_IMM;
9306 return OP_ISHR_UN_IMM;
9323 return OP_LSHR_UN_IMM;
9326 return OP_COMPARE_IMM;
9328 return OP_ICOMPARE_IMM;
9330 return OP_LCOMPARE_IMM;
9332 case OP_STORE_MEMBASE_REG:
9333 return OP_STORE_MEMBASE_IMM;
9334 case OP_STOREI1_MEMBASE_REG:
9335 return OP_STOREI1_MEMBASE_IMM;
9336 case OP_STOREI2_MEMBASE_REG:
9337 return OP_STOREI2_MEMBASE_IMM;
9338 case OP_STOREI4_MEMBASE_REG:
9339 return OP_STOREI4_MEMBASE_IMM;
9341 #if defined(__i386__) || defined (__x86_64__)
9343 return OP_X86_PUSH_IMM;
9344 case OP_X86_COMPARE_MEMBASE_REG:
9345 return OP_X86_COMPARE_MEMBASE_IMM;
9347 #if defined(__x86_64__)
9348 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9349 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9351 case OP_VOIDCALL_REG:
9360 return OP_LOCALLOC_IMM;
9367 ldind_to_load_membase (int opcode)
9371 return OP_LOADI1_MEMBASE;
9373 return OP_LOADU1_MEMBASE;
9375 return OP_LOADI2_MEMBASE;
9377 return OP_LOADU2_MEMBASE;
9379 return OP_LOADI4_MEMBASE;
9381 return OP_LOADU4_MEMBASE;
9383 return OP_LOAD_MEMBASE;
9385 return OP_LOAD_MEMBASE;
9387 return OP_LOADI8_MEMBASE;
9389 return OP_LOADR4_MEMBASE;
9391 return OP_LOADR8_MEMBASE;
9393 g_assert_not_reached ();
9400 stind_to_store_membase (int opcode)
9404 return OP_STOREI1_MEMBASE_REG;
9406 return OP_STOREI2_MEMBASE_REG;
9408 return OP_STOREI4_MEMBASE_REG;
9411 return OP_STORE_MEMBASE_REG;
9413 return OP_STOREI8_MEMBASE_REG;
9415 return OP_STORER4_MEMBASE_REG;
9417 return OP_STORER8_MEMBASE_REG;
9419 g_assert_not_reached ();
9426 mono_load_membase_to_load_mem (int opcode)
9428 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9429 #if defined(__i386__) || defined(__x86_64__)
9431 case OP_LOAD_MEMBASE:
9433 case OP_LOADU1_MEMBASE:
9434 return OP_LOADU1_MEM;
9435 case OP_LOADU2_MEMBASE:
9436 return OP_LOADU2_MEM;
9437 case OP_LOADI4_MEMBASE:
9438 return OP_LOADI4_MEM;
9439 case OP_LOADU4_MEMBASE:
9440 return OP_LOADU4_MEM;
9441 #if SIZEOF_VOID_P == 8
9442 case OP_LOADI8_MEMBASE:
9443 return OP_LOADI8_MEM;
9452 op_to_op_dest_membase (int store_opcode, int opcode)
9454 #if defined(__i386__)
9455 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9460 return OP_X86_ADD_MEMBASE_REG;
9462 return OP_X86_SUB_MEMBASE_REG;
9464 return OP_X86_AND_MEMBASE_REG;
9466 return OP_X86_OR_MEMBASE_REG;
9468 return OP_X86_XOR_MEMBASE_REG;
9471 return OP_X86_ADD_MEMBASE_IMM;
9474 return OP_X86_SUB_MEMBASE_IMM;
9477 return OP_X86_AND_MEMBASE_IMM;
9480 return OP_X86_OR_MEMBASE_IMM;
9483 return OP_X86_XOR_MEMBASE_IMM;
9489 #if defined(__x86_64__)
9490 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9495 return OP_X86_ADD_MEMBASE_REG;
9497 return OP_X86_SUB_MEMBASE_REG;
9499 return OP_X86_AND_MEMBASE_REG;
9501 return OP_X86_OR_MEMBASE_REG;
9503 return OP_X86_XOR_MEMBASE_REG;
9505 return OP_X86_ADD_MEMBASE_IMM;
9507 return OP_X86_SUB_MEMBASE_IMM;
9509 return OP_X86_AND_MEMBASE_IMM;
9511 return OP_X86_OR_MEMBASE_IMM;
9513 return OP_X86_XOR_MEMBASE_IMM;
9515 return OP_AMD64_ADD_MEMBASE_REG;
9517 return OP_AMD64_SUB_MEMBASE_REG;
9519 return OP_AMD64_AND_MEMBASE_REG;
9521 return OP_AMD64_OR_MEMBASE_REG;
9523 return OP_AMD64_XOR_MEMBASE_REG;
9526 return OP_AMD64_ADD_MEMBASE_IMM;
9529 return OP_AMD64_SUB_MEMBASE_IMM;
9532 return OP_AMD64_AND_MEMBASE_IMM;
9535 return OP_AMD64_OR_MEMBASE_IMM;
9538 return OP_AMD64_XOR_MEMBASE_IMM;
9548 op_to_op_store_membase (int store_opcode, int opcode)
9550 #if defined(__i386__) || defined(__x86_64__)
9553 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9554 return OP_X86_SETEQ_MEMBASE;
9556 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9557 return OP_X86_SETNE_MEMBASE;
9565 op_to_op_src1_membase (int load_opcode, int opcode)
9568 /* FIXME: This has sign extension issues */
9570 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9571 return OP_X86_COMPARE_MEMBASE8_IMM;
9574 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9579 return OP_X86_PUSH_MEMBASE;
9580 case OP_COMPARE_IMM:
9581 case OP_ICOMPARE_IMM:
9582 return OP_X86_COMPARE_MEMBASE_IMM;
9585 return OP_X86_COMPARE_MEMBASE_REG;
9590 /* FIXME: This has sign extension issues */
9592 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9593 return OP_X86_COMPARE_MEMBASE8_IMM;
9598 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9599 return OP_X86_PUSH_MEMBASE;
9601 /* FIXME: This only works for 32 bit immediates
9602 case OP_COMPARE_IMM:
9603 case OP_LCOMPARE_IMM:
9604 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9605 return OP_AMD64_COMPARE_MEMBASE_IMM;
9607 case OP_ICOMPARE_IMM:
9608 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9609 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9613 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9614 return OP_AMD64_COMPARE_MEMBASE_REG;
9617 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9618 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9627 op_to_op_src2_membase (int load_opcode, int opcode)
9630 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9636 return OP_X86_COMPARE_REG_MEMBASE;
9638 return OP_X86_ADD_REG_MEMBASE;
9640 return OP_X86_SUB_REG_MEMBASE;
9642 return OP_X86_AND_REG_MEMBASE;
9644 return OP_X86_OR_REG_MEMBASE;
9646 return OP_X86_XOR_REG_MEMBASE;
9653 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9654 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9658 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9659 return OP_AMD64_COMPARE_REG_MEMBASE;
9662 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9663 return OP_X86_ADD_REG_MEMBASE;
9665 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9666 return OP_X86_SUB_REG_MEMBASE;
9668 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9669 return OP_X86_AND_REG_MEMBASE;
9671 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9672 return OP_X86_OR_REG_MEMBASE;
9674 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9675 return OP_X86_XOR_REG_MEMBASE;
9677 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9678 return OP_AMD64_ADD_REG_MEMBASE;
9680 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9681 return OP_AMD64_SUB_REG_MEMBASE;
9683 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9684 return OP_AMD64_AND_REG_MEMBASE;
9686 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9687 return OP_AMD64_OR_REG_MEMBASE;
9689 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9690 return OP_AMD64_XOR_REG_MEMBASE;
9698 mono_op_to_op_imm_noemul (int opcode)
9701 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPTS)
9706 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9714 return mono_op_to_op_imm (opcode);
9719 * mono_handle_global_vregs:
9721 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9725 mono_handle_global_vregs (MonoCompile *cfg)
9731 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9733 /* Find local vregs used in more than one bb */
9734 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9735 MonoInst *ins = bb->code;
9736 int block_num = bb->block_num;
9738 if (cfg->verbose_level > 1)
9739 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9742 for (; ins; ins = ins->next) {
9743 const char *spec = INS_INFO (ins->opcode);
9744 int regtype, regindex;
9747 if (G_UNLIKELY (cfg->verbose_level > 1))
9748 mono_print_ins (ins);
9750 g_assert (ins->opcode >= MONO_CEE_LAST);
9752 for (regindex = 0; regindex < 3; regindex ++) {
9755 if (regindex == 0) {
9756 regtype = spec [MONO_INST_DEST];
9760 } else if (regindex == 1) {
9761 regtype = spec [MONO_INST_SRC1];
9766 regtype = spec [MONO_INST_SRC2];
9772 #if SIZEOF_VOID_P == 4
9773 if (regtype == 'l') {
9775 * Since some instructions reference the original long vreg,
9776 * and some reference the two component vregs, it is quite hard
9777 * to determine when it needs to be global. So be conservative.
9779 if (!get_vreg_to_inst (cfg, vreg)) {
9780 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9782 if (cfg->verbose_level > 1)
9783 printf ("LONG VREG R%d made global.\n", vreg);
9786 * Make the component vregs volatile since the optimizations can
9787 * get confused otherwise.
9789 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9790 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9795 g_assert (vreg != -1);
9797 prev_bb = vreg_to_bb [vreg];
9799 /* 0 is a valid block num */
9800 vreg_to_bb [vreg] = block_num + 1;
9801 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9802 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9805 if (!get_vreg_to_inst (cfg, vreg)) {
9806 if (G_UNLIKELY (cfg->verbose_level > 1))
9807 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9811 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9814 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9817 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9820 g_assert_not_reached ();
9824 /* Flag as having been used in more than one bb */
9825 vreg_to_bb [vreg] = -1;
9831 /* If a variable is used in only one bblock, convert it into a local vreg */
9832 for (i = 0; i < cfg->num_varinfo; i++) {
9833 MonoInst *var = cfg->varinfo [i];
9834 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9836 switch (var->type) {
9842 #if SIZEOF_VOID_P == 8
9845 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9846 /* Enabling this screws up the fp stack on x86 */
9849 /* Arguments are implicitly global */
9850 /* Putting R4 vars into registers doesn't work currently */
9851 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9853 * Make that the variable's liveness interval doesn't contain a call, since
9854 * that would cause the lvreg to be spilled, making the whole optimization
9857 /* This is too slow for JIT compilation */
9859 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9861 int def_index, call_index, ins_index;
9862 gboolean spilled = FALSE;
9867 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9868 const char *spec = INS_INFO (ins->opcode);
9870 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9871 def_index = ins_index;
9873 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9874 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9875 if (call_index > def_index) {
9881 if (MONO_IS_CALL (ins))
9882 call_index = ins_index;
9892 if (G_UNLIKELY (cfg->verbose_level > 2))
9893 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9894 var->flags |= MONO_INST_IS_DEAD;
9895 cfg->vreg_to_inst [var->dreg] = NULL;
9902 * Compress the varinfo and vars tables so the liveness computation is faster and
9903 * takes up less space.
9906 for (i = 0; i < cfg->num_varinfo; ++i) {
9907 MonoInst *var = cfg->varinfo [i];
9908 if (pos < i && cfg->locals_start == i)
9909 cfg->locals_start = pos;
9910 if (!(var->flags & MONO_INST_IS_DEAD)) {
9912 cfg->varinfo [pos] = cfg->varinfo [i];
9913 cfg->varinfo [pos]->inst_c0 = pos;
9914 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9915 cfg->vars [pos].idx = pos;
9916 #if SIZEOF_VOID_P == 4
9917 if (cfg->varinfo [pos]->type == STACK_I8) {
9918 /* Modify the two component vars too */
9921 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9922 var1->inst_c0 = pos;
9923 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9924 var1->inst_c0 = pos;
9931 cfg->num_varinfo = pos;
9932 if (cfg->locals_start > cfg->num_varinfo)
9933 cfg->locals_start = cfg->num_varinfo;
9937 * mono_spill_global_vars:
9939 * Generate spill code for variables which are not allocated to registers,
9940 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9941 * code is generated which could be optimized by the local optimization passes.
9944 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9949 guint32 *vreg_to_lvreg;
9951 guint32 i, lvregs_len;
9952 gboolean dest_has_lvreg = FALSE;
9953 guint32 stacktypes [128];
9955 *need_local_opts = FALSE;
9957 memset (spec2, 0, sizeof (spec2));
9959 /* FIXME: Move this function to mini.c */
9960 stacktypes ['i'] = STACK_PTR;
9961 stacktypes ['l'] = STACK_I8;
9962 stacktypes ['f'] = STACK_R8;
9964 #if SIZEOF_VOID_P == 4
9965 /* Create MonoInsts for longs */
9966 for (i = 0; i < cfg->num_varinfo; i++) {
9967 MonoInst *ins = cfg->varinfo [i];
9969 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9970 switch (ins->type) {
9971 #ifdef MONO_ARCH_SOFT_FLOAT
9977 g_assert (ins->opcode == OP_REGOFFSET);
9979 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
9981 tree->opcode = OP_REGOFFSET;
9982 tree->inst_basereg = ins->inst_basereg;
9983 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
9985 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
9987 tree->opcode = OP_REGOFFSET;
9988 tree->inst_basereg = ins->inst_basereg;
9989 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
9999 /* FIXME: widening and truncation */
10002 * As an optimization, when a variable allocated to the stack is first loaded into
10003 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10004 * the variable again.
10006 orig_next_vreg = cfg->next_vreg;
10007 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10008 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10011 /* Add spill loads/stores */
10012 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10015 if (cfg->verbose_level > 1)
10016 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10018 /* Clear vreg_to_lvreg array */
10019 for (i = 0; i < lvregs_len; i++)
10020 vreg_to_lvreg [lvregs [i]] = 0;
10024 MONO_BB_FOR_EACH_INS (bb, ins) {
10025 const char *spec = INS_INFO (ins->opcode);
10026 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10027 gboolean store, no_lvreg;
10029 if (G_UNLIKELY (cfg->verbose_level > 1))
10030 mono_print_ins (ins);
10032 if (ins->opcode == OP_NOP)
10036 * We handle LDADDR here as well, since it can only be decomposed
10037 * when variable addresses are known.
10039 if (ins->opcode == OP_LDADDR) {
10040 MonoInst *var = ins->inst_p0;
10042 if (var->opcode == OP_VTARG_ADDR) {
10043 /* Happens on SPARC/S390 where vtypes are passed by reference */
10044 MonoInst *vtaddr = var->inst_left;
10045 if (vtaddr->opcode == OP_REGVAR) {
10046 ins->opcode = OP_MOVE;
10047 ins->sreg1 = vtaddr->dreg;
10049 else if (var->inst_left->opcode == OP_REGOFFSET) {
10050 ins->opcode = OP_LOAD_MEMBASE;
10051 ins->inst_basereg = vtaddr->inst_basereg;
10052 ins->inst_offset = vtaddr->inst_offset;
10056 g_assert (var->opcode == OP_REGOFFSET);
10058 ins->opcode = OP_ADD_IMM;
10059 ins->sreg1 = var->inst_basereg;
10060 ins->inst_imm = var->inst_offset;
10063 *need_local_opts = TRUE;
10064 spec = INS_INFO (ins->opcode);
10067 if (ins->opcode < MONO_CEE_LAST) {
10068 mono_print_ins (ins);
10069 g_assert_not_reached ();
10073 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10077 if (MONO_IS_STORE_MEMBASE (ins)) {
10078 tmp_reg = ins->dreg;
10079 ins->dreg = ins->sreg2;
10080 ins->sreg2 = tmp_reg;
10083 spec2 [MONO_INST_DEST] = ' ';
10084 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10085 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10087 } else if (MONO_IS_STORE_MEMINDEX (ins))
10088 g_assert_not_reached ();
10093 if (G_UNLIKELY (cfg->verbose_level > 1))
10094 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10099 regtype = spec [MONO_INST_DEST];
10100 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10103 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10104 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10105 MonoInst *store_ins;
10108 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10110 if (var->opcode == OP_REGVAR) {
10111 ins->dreg = var->dreg;
10112 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10114 * Instead of emitting a load+store, use a _membase opcode.
10116 g_assert (var->opcode == OP_REGOFFSET);
10117 if (ins->opcode == OP_MOVE) {
10120 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10121 ins->inst_basereg = var->inst_basereg;
10122 ins->inst_offset = var->inst_offset;
10125 spec = INS_INFO (ins->opcode);
10129 g_assert (var->opcode == OP_REGOFFSET);
10131 prev_dreg = ins->dreg;
10133 /* Invalidate any previous lvreg for this vreg */
10134 vreg_to_lvreg [ins->dreg] = 0;
10138 #ifdef MONO_ARCH_SOFT_FLOAT
10139 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10141 store_opcode = OP_STOREI8_MEMBASE_REG;
10145 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10147 if (regtype == 'l') {
10148 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10149 mono_bblock_insert_after_ins (bb, ins, store_ins);
10150 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10151 mono_bblock_insert_after_ins (bb, ins, store_ins);
10154 g_assert (store_opcode != OP_STOREV_MEMBASE);
10156 /* Try to fuse the store into the instruction itself */
10157 /* FIXME: Add more instructions */
10158 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10159 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10160 ins->inst_imm = ins->inst_c0;
10161 ins->inst_destbasereg = var->inst_basereg;
10162 ins->inst_offset = var->inst_offset;
10163 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10164 ins->opcode = store_opcode;
10165 ins->inst_destbasereg = var->inst_basereg;
10166 ins->inst_offset = var->inst_offset;
10170 tmp_reg = ins->dreg;
10171 ins->dreg = ins->sreg2;
10172 ins->sreg2 = tmp_reg;
10175 spec2 [MONO_INST_DEST] = ' ';
10176 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10177 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10179 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10180 // FIXME: The backends expect the base reg to be in inst_basereg
10181 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10183 ins->inst_basereg = var->inst_basereg;
10184 ins->inst_offset = var->inst_offset;
10185 spec = INS_INFO (ins->opcode);
10187 /* printf ("INS: "); mono_print_ins (ins); */
10188 /* Create a store instruction */
10189 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10191 /* Insert it after the instruction */
10192 mono_bblock_insert_after_ins (bb, ins, store_ins);
10195 * We can't assign ins->dreg to var->dreg here, since the
10196 * sregs could use it. So set a flag, and do it after
10199 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10200 dest_has_lvreg = TRUE;
10209 for (srcindex = 0; srcindex < 2; ++srcindex) {
10210 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10211 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10213 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10214 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10215 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10216 MonoInst *load_ins;
10217 guint32 load_opcode;
10219 if (var->opcode == OP_REGVAR) {
10221 ins->sreg1 = var->dreg;
10223 ins->sreg2 = var->dreg;
10227 g_assert (var->opcode == OP_REGOFFSET);
10229 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10231 g_assert (load_opcode != OP_LOADV_MEMBASE);
10233 if (vreg_to_lvreg [sreg]) {
10234 /* The variable is already loaded to an lvreg */
10235 if (G_UNLIKELY (cfg->verbose_level > 1))
10236 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10238 ins->sreg1 = vreg_to_lvreg [sreg];
10240 ins->sreg2 = vreg_to_lvreg [sreg];
10244 /* Try to fuse the load into the instruction */
10245 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10246 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10247 ins->inst_basereg = var->inst_basereg;
10248 ins->inst_offset = var->inst_offset;
10249 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10250 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10251 ins->sreg2 = var->inst_basereg;
10252 ins->inst_offset = var->inst_offset;
10254 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10255 ins->opcode = OP_NOP;
10258 //printf ("%d ", srcindex); mono_print_ins (ins);
10260 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10262 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10263 if (var->dreg == prev_dreg) {
10265 * sreg refers to the value loaded by the load
10266 * emitted below, but we need to use ins->dreg
10267 * since it refers to the store emitted earlier.
10271 vreg_to_lvreg [var->dreg] = sreg;
10272 g_assert (lvregs_len < 1024);
10273 lvregs [lvregs_len ++] = var->dreg;
10282 if (regtype == 'l') {
10283 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10284 mono_bblock_insert_before_ins (bb, ins, load_ins);
10285 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10286 mono_bblock_insert_before_ins (bb, ins, load_ins);
10289 #if SIZEOF_VOID_P == 4
10290 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10292 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10293 mono_bblock_insert_before_ins (bb, ins, load_ins);
10299 if (dest_has_lvreg) {
10300 vreg_to_lvreg [prev_dreg] = ins->dreg;
10301 g_assert (lvregs_len < 1024);
10302 lvregs [lvregs_len ++] = prev_dreg;
10303 dest_has_lvreg = FALSE;
10307 tmp_reg = ins->dreg;
10308 ins->dreg = ins->sreg2;
10309 ins->sreg2 = tmp_reg;
10312 if (MONO_IS_CALL (ins)) {
10313 /* Clear vreg_to_lvreg array */
10314 for (i = 0; i < lvregs_len; i++)
10315 vreg_to_lvreg [lvregs [i]] = 0;
10319 if (cfg->verbose_level > 1)
10320 mono_print_ins_index (1, ins);
10327 * - use 'iadd' instead of 'int_add'
10328 * - handling ovf opcodes: decompose in method_to_ir.
10329 * - unify iregs/fregs
10330 * -> partly done, the missing parts are:
10331 * - a more complete unification would involve unifying the hregs as well, so
10332 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10333 * would no longer map to the machine hregs, so the code generators would need to
10334 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10335 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10336 * fp/non-fp branches speeds it up by about 15%.
10337 * - use sext/zext opcodes instead of shifts
10339 * - get rid of TEMPLOADs if possible and use vregs instead
10340 * - clean up usage of OP_P/OP_ opcodes
10341 * - cleanup usage of DUMMY_USE
10342 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10344 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10345 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10346 * - make sure handle_stack_args () is called before the branch is emitted
10347 * - when the new IR is done, get rid of all unused stuff
10348 * - COMPARE/BEQ as separate instructions or unify them ?
10349 * - keeping them separate allows specialized compare instructions like
10350 * compare_imm, compare_membase
10351 * - most back ends unify fp compare+branch, fp compare+ceq
10352 * - integrate handle_stack_args into inline_method
10353 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10354 * - Things to backport to the old JIT:
10355 * - op_atomic_exchange fix for amd64
10356 * - localloc fix for amd64
10357 * - x86 type_token change
10359 * - long eq/ne optimizations
10360 * - handle long shift opts on 32 bit platforms somehow: they require
10361 * 3 sregs (2 for arg1 and 1 for arg2)
10362 * - make byref a 'normal' type.
10363 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10364 * variable if needed.
10365 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10366 * like inline_method.
10367 * - remove inlining restrictions
10368 * - remove mono_save_args.
10369 * - add 'introduce a new optimization to simplify some range checks'
10370 * - fix LNEG and enable cfold of INEG
10371 * - generalize x86 optimizations like ldelema as a peephole optimization
10372 * - add store_mem_imm for amd64
10373 * - optimize the loading of the interruption flag in the managed->native wrappers
10374 * - avoid special handling of OP_NOP in passes
10375 * - move code inserting instructions into one function/macro.
10376 * - cleanup the code replacement in decompose_long_opts ()
10377 * - try a coalescing phase after liveness analysis
10378 * - add float -> vreg conversion + local optimizations on !x86
10379 * - figure out how to handle decomposed branches during optimizations, ie.
10380 * compare+branch, op_jump_table+op_br etc.
10381 * - promote RuntimeXHandles to vregs
10382 * - vtype cleanups:
10383 * - add a NEW_VARLOADA_VREG macro
10384 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10385 * accessing vtype fields.
10386 * - get rid of I8CONST on 64 bit platforms
10387 * - dealing with the increase in code size due to branches created during opcode
10389 * - use extended basic blocks
10390 * - all parts of the JIT
10391 * - handle_global_vregs () && local regalloc
10392 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10393 * - sources of increase in code size:
10396 * - isinst and castclass
10397 * - lvregs not allocated to global registers even if used multiple times
10398 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10400 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10401 * - add all micro optimizations from the old JIT
10402 * - put tree optimizations into the deadce pass
10403 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10404 * specific function.
10405 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10406 * fcompare + branchCC.
10407 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10408 * running generics.exe.
10409 * - create a helper function for allocating a stack slot, taking into account
10410 * MONO_CFG_HAS_SPILLUP.
10411 * - merge new GC changes in mini.c.
10413 * - merge the ia64 switch changes.
10414 * - merge the mips conditional changes.
10415 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10416 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10417 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10418 * - optimize mono_regstate2_alloc_int/float.
10419 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10420 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10421 * parts of the tree could be separated by other instructions, killing the tree
10422 * arguments, or stores killing loads etc. Also, should we fold loads into other
10423 * instructions if the result of the load is used multiple times ?
10424 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10425 * - LAST MERGE: 108395.
10426 * - when returning vtypes in registers, generate IR and append it to the end of the
10427 * last bb instead of doing it in the epilog.
10428 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10429 * ones in inssel.h.
10430 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10438 - When to decompose opcodes:
10439 - earlier: this makes some optimizations hard to implement, since the low level IR
10440 no longer contains the neccessary information. But it is easier to do.
10441 - later: harder to implement, enables more optimizations.
10442 - Branches inside bblocks:
10443 - created when decomposing complex opcodes.
10444 - branches to another bblock: harmless, but not tracked by the branch
10445 optimizations, so need to branch to a label at the start of the bblock.
10446 - branches to inside the same bblock: very problematic, trips up the local
10447 reg allocator. Can be fixed by spitting the current bblock, but that is a
10448 complex operation, since some local vregs can become global vregs etc.
10449 - Local/global vregs:
10450 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10451 local register allocator.
10452 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10453 structure, created by mono_create_var (). Assigned to hregs or the stack by
10454 the global register allocator.
10455 - When to do optimizations like alu->alu_imm:
10456 - earlier -> saves work later on since the IR will be smaller/simpler
10457 - later -> can work on more instructions
10458 - Handling of valuetypes:
10459 - When a vtype is pushed on the stack, a new tempotary is created, an
10460 instruction computing its address (LDADDR) is emitted and pushed on
10461 the stack. Need to optimize cases when the vtype is used immediately as in
10462 argument passing, stloc etc.
10463 - Instead of the to_end stuff in the old JIT, simply call the function handling
10464 the values on the stack before emitting the last instruction of the bb.