/* Special case structs with only a float member */
if (info->num_fields == 1) {
- int ftype = mini_replace_type (info->fields [0].field->type)->type;
+ int ftype = mini_type_get_underlying_type (gsctx, info->fields [0].field->type)->type;
if ((info->native_size == 8) && (ftype == MONO_TYPE_R8)) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ArgOnDoubleFpStack;
{
ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
switch (ret_type->type) {
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
}
ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
switch (ptype->type) {
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
add_general (&gr, param_regs, &stack_size, ainfo);
break;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
add_general (&gr, param_regs, &stack_size, ainfo);
break;
case MONO_TYPE_I4:
add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie);
}
+ if (cinfo->vtype_retaddr) {
+ /* if the function returns a struct on stack, the called method already does a ret $0x4 */
+ cinfo->callee_stack_pop = 4;
+ } else if (CALLCONV_IS_STDCALL (sig) && sig->pinvoke) {
+ /* Have to compensate for the stack space popped by the native callee */
+ cinfo->callee_stack_pop = stack_size;
+ }
+
if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) {
cinfo->need_stack_align = TRUE;
cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT);
stack_size += cinfo->stack_align_amount;
}
- if (cinfo->vtype_retaddr) {
- /* if the function returns a struct on stack, the called method already does a ret $0x4 */
- cinfo->callee_stack_pop = 4;
- }
-
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
cinfo->freg_usage = fr;
* the extra stack space would be left on the stack after the tail call.
*/
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_replace_type (callee_sig->ret);
+ callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
call->call_info = cinfo;
gboolean
mono_x86_have_tls_get (void)
{
-#ifdef __APPLE__
+#ifdef TARGET_MACH
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
guint32 *ins;
cpos = bb->max_offset;
- if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
+ if ((cfg->prof_options & MONO_PROFILE_COVERAGE) && cfg->coverage_info) {
MonoProfileCoverageInfo *cov = cfg->coverage_info;
g_assert (!cfg->compile_aot);
cpos += 6;
x86_ret (code);
break;
}
+ case OP_GET_EX_OBJ:
+ if (ins->dreg != X86_EAX)
+ x86_mov_reg_reg (code, ins->dreg, X86_EAX, sizeof (gpointer));
+ break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
/* Not needed on the fp stack */
break;
case OP_MOVE_F_TO_I4:
- x86_push_reg (code, X86_EAX);
- x86_fist_pop_membase (code, X86_ESP, 0, FALSE);
- x86_pop_reg (code, ins->dreg);
+ x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE);
+ x86_mov_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, 4);
break;
case OP_MOVE_I4_TO_F:
- x86_push_reg (code, ins->sreg1);
- x86_fild_membase (code, X86_ESP, 0, FALSE);
- x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
+ x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
break;
case OP_FADD:
x86_fp_op_reg (code, X86_FADD, 1, TRUE);
break;
}
case OP_ATOMIC_EXCHANGE_I4: {
- /* LOCK prefix is implied. */
- x86_xchg_membase_reg (code, ins->sreg1, ins->inst_offset, ins->sreg2, 4);
- x86_mov_reg_reg (code, ins->dreg, ins->sreg2, 4);
+ guchar *br[2];
+ int sreg2 = ins->sreg2;
+ int breg = ins->inst_basereg;
+
+ g_assert (cfg->has_atomic_exchange_i4);
+
+ /* cmpxchg uses eax as comperand, need to make sure we can use it
+ * hack to overcome limits in x86 reg allocator
+ * (req: dreg == eax and sreg2 != eax and breg != eax)
+ */
+ g_assert (ins->dreg == X86_EAX);
+
+ /* We need the EAX reg for the cmpxchg */
+ if (ins->sreg2 == X86_EAX) {
+ sreg2 = (breg == X86_EDX) ? X86_EBX : X86_EDX;
+ x86_push_reg (code, sreg2);
+ x86_mov_reg_reg (code, sreg2, X86_EAX, 4);
+ }
+
+ if (breg == X86_EAX) {
+ breg = (sreg2 == X86_ESI) ? X86_EDI : X86_ESI;
+ x86_push_reg (code, breg);
+ x86_mov_reg_reg (code, breg, X86_EAX, 4);
+ }
+
+ x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4);
+
+ br [0] = code; x86_prefix (code, X86_LOCK_PREFIX);
+ x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
+ br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
+ x86_patch (br [1], br [0]);
+
+ if (breg != ins->inst_basereg)
+ x86_pop_reg (code, breg);
+
+ if (ins->sreg2 != sreg2)
+ x86_pop_reg (code, sreg2);
+
break;
}
case OP_ATOMIC_CAS_I4: {
case OP_XZERO:
x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg);
break;
- case OP_ICONV_TO_R8_RAW:
- x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
- x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
- break;
case OP_FCONV_TO_R8_X:
x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
gboolean
mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
{
- int i;
- gboolean can_write = TRUE;
/*
* If method_start is non-NULL we need to perform bound checks, since we access memory
* at code - offset we could go before the start of the method and end up in a different
memset (buf, 0, size);
memcpy (buf + offset - diff, method_start, diff + size - offset);
}
- code -= offset;
- for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
- int idx = mono_breakpoint_info_index [i];
- guint8 *ptr;
- if (idx < 1)
- continue;
- ptr = mono_breakpoint_info [idx].address;
- if (ptr >= code && ptr < code + size) {
- guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
- can_write = FALSE;
- /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
- buf [ptr - code] = saved_byte;
- }
- }
- return can_write;
+ return TRUE;
}
/*