#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-hwcap-x86.h>
+#include <mono/utils/mono-threads.h>
#include "trace.h"
#include "mini-x86.h"
/* Special case structs with only a float member */
if (info->num_fields == 1) {
- int ftype = mini_replace_type (info->fields [0].field->type)->type;
+ int ftype = mini_type_get_underlying_type (gsctx, info->fields [0].field->type)->type;
if ((info->native_size == 8) && (ftype == MONO_TYPE_R8)) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ArgOnDoubleFpStack;
{
ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
switch (ret_type->type) {
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
}
ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
switch (ptype->type) {
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
add_general (&gr, param_regs, &stack_size, ainfo);
break;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
add_general (&gr, param_regs, &stack_size, ainfo);
break;
case MONO_TYPE_I4:
add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie);
}
+ if (cinfo->vtype_retaddr) {
+ /* if the function returns a struct on stack, the called method already does a ret $0x4 */
+ cinfo->callee_stack_pop = 4;
+ } else if (CALLCONV_IS_STDCALL (sig) && sig->pinvoke) {
+ /* Have to compensate for the stack space popped by the native callee */
+ cinfo->callee_stack_pop = stack_size;
+ }
+
if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) {
cinfo->need_stack_align = TRUE;
cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT);
stack_size += cinfo->stack_align_amount;
}
- if (cinfo->vtype_retaddr) {
- /* if the function returns a struct on stack, the called method already does a ret $0x4 */
- cinfo->callee_stack_pop = 4;
- }
-
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
cinfo->freg_usage = fr;
* the extra stack space would be left on the stack after the tail call.
*/
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_replace_type (callee_sig->ret);
+ callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
call->call_info = cinfo;
gboolean
mono_x86_have_tls_get (void)
{
-#ifdef __APPLE__
+#ifdef TARGET_MACH
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
guint32 *ins;
cpos = bb->max_offset;
- if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
+ if ((cfg->prof_options & MONO_PROFILE_COVERAGE) && cfg->coverage_info) {
MonoProfileCoverageInfo *cov = cfg->coverage_info;
g_assert (!cfg->compile_aot);
cpos += 6;
x86_ret (code);
break;
}
+ case OP_GET_EX_OBJ:
+ if (ins->dreg != X86_EAX)
+ x86_mov_reg_reg (code, ins->dreg, X86_EAX, sizeof (gpointer));
+ break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
case OP_FMOVE:
/* Not needed on the fp stack */
break;
- case OP_MOVE_R4_TO_I4:
- x86_push_reg (code, X86_EAX);
- x86_fist_pop_membase (code, X86_ESP, 0, FALSE);
- x86_pop_reg (code, ins->dreg);
+ case OP_MOVE_F_TO_I4:
+ x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE);
+ x86_mov_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, 4);
break;
- case OP_MOVE_I4_TO_R4:
- x86_push_reg (code, ins->sreg1);
- x86_fild_membase (code, X86_ESP, 0, FALSE);
- x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ case OP_MOVE_I4_TO_F:
+ x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
+ x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
break;
case OP_FADD:
x86_fp_op_reg (code, X86_FADD, 1, TRUE);
case OP_XZERO:
x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg);
break;
- case OP_ICONV_TO_R8_RAW:
- x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
- x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
- break;
case OP_FCONV_TO_R8_X:
x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
- case OP_NACL_GC_SAFE_POINT: {
-#if defined(__native_client_codegen__) && defined(__native_client_gc__)
- if (cfg->compile_aot)
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc);
- else {
- guint8 *br [1];
-
- x86_test_mem_imm8 (code, (gpointer)&__nacl_thread_suspension_needed, 0xFFFFFFFF);
- br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc);
- x86_patch (br[0], code);
- }
+ case OP_GC_SAFE_POINT: {
+ const char *polling_func = NULL;
+ int compare_val = 0;
+ guint8 *br [1];
+
+#if defined (USE_COOP_GC)
+ polling_func = "mono_threads_state_poll";
+ compare_val = 1;
+#elif defined(__native_client_codegen__) && defined(__native_client_gc__)
+ polling_func = "mono_nacl_gc";
+ compare_val = 0xFFFFFFFF;
#endif
+ if (!polling_func)
+ break;
+
+ x86_test_membase_imm (code, ins->sreg1, 0, compare_val);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func);
+ x86_patch (br [0], code);
+
break;
}
case OP_GC_LIVENESS_DEF:
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
+mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
- MonoJumpInfo *patch_info;
- gboolean compile_aot = !run_cctors;
+ unsigned char *ip = ji->ip.i + code;
- for (patch_info = ji; patch_info; patch_info = patch_info->next) {
- unsigned char *ip = patch_info->ip.i + code;
- const unsigned char *target;
-
- if (compile_aot) {
- switch (patch_info->type) {
- case MONO_PATCH_INFO_BB:
- case MONO_PATCH_INFO_LABEL:
- break;
- default:
- /* No need to patch these */
- continue;
- }
+ switch (ji->type) {
+ case MONO_PATCH_INFO_IP:
+ *((gconstpointer *)(ip)) = target;
+ break;
+ case MONO_PATCH_INFO_CLASS_INIT: {
+ guint8 *code = ip;
+ /* Might already been changed to a nop */
+ x86_call_code (code, 0);
+ x86_patch (ip, (unsigned char*)target);
+ break;
+ }
+ case MONO_PATCH_INFO_ABS:
+ case MONO_PATCH_INFO_METHOD:
+ case MONO_PATCH_INFO_METHOD_JUMP:
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ case MONO_PATCH_INFO_BB:
+ case MONO_PATCH_INFO_LABEL:
+ case MONO_PATCH_INFO_RGCTX_FETCH:
+ case MONO_PATCH_INFO_MONITOR_ENTER:
+ case MONO_PATCH_INFO_MONITOR_ENTER_V4:
+ case MONO_PATCH_INFO_MONITOR_EXIT:
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR:
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ unsigned char *_target = target;
+ int ret;
+ /* All patch targets modified in x86_patch */
+ /* are IP relative. */
+ _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
}
-
- target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
-
- switch (patch_info->type) {
- case MONO_PATCH_INFO_IP:
- *((gconstpointer *)(ip)) = target;
- break;
- case MONO_PATCH_INFO_CLASS_INIT: {
- guint8 *code = ip;
- /* Might already been changed to a nop */
- x86_call_code (code, 0);
- x86_patch (ip, target);
- break;
+ else {
+ x86_patch (ip, (unsigned char*)target);
}
- case MONO_PATCH_INFO_ABS:
- case MONO_PATCH_INFO_METHOD:
- case MONO_PATCH_INFO_METHOD_JUMP:
- case MONO_PATCH_INFO_INTERNAL_METHOD:
- case MONO_PATCH_INFO_BB:
- case MONO_PATCH_INFO_LABEL:
- case MONO_PATCH_INFO_RGCTX_FETCH:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
- case MONO_PATCH_INFO_MONITOR_ENTER:
- case MONO_PATCH_INFO_MONITOR_ENTER_V4:
- case MONO_PATCH_INFO_MONITOR_EXIT:
- case MONO_PATCH_INFO_JIT_ICALL_ADDR:
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (nacl_is_code_address (code)) {
- /* For tail calls, code is patched after being installed */
- /* but not through the normal "patch callsite" method. */
- unsigned char buf[kNaClAlignment];
- unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
- unsigned char *_target = target;
- int ret;
- /* All patch targets modified in x86_patch */
- /* are IP relative. */
- _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
- memcpy (buf, aligned_code, kNaClAlignment);
- /* Patch a temp buffer of bundle size, */
- /* then install to actual location. */
- x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
- ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
- g_assert (ret == 0);
- }
- else {
- x86_patch (ip, target);
- }
#else
- x86_patch (ip, target);
+ x86_patch (ip, (unsigned char*)target);
#endif
- break;
- case MONO_PATCH_INFO_NONE:
- break;
- case MONO_PATCH_INFO_R4:
- case MONO_PATCH_INFO_R8: {
- guint32 offset = mono_arch_get_patch_offset (ip);
- *((gconstpointer *)(ip + offset)) = target;
- break;
- }
- default: {
- guint32 offset = mono_arch_get_patch_offset (ip);
+ break;
+ case MONO_PATCH_INFO_NONE:
+ break;
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8: {
+ guint32 offset = mono_arch_get_patch_offset (ip);
+ *((gconstpointer *)(ip + offset)) = target;
+ break;
+ }
+ default: {
+ guint32 offset = mono_arch_get_patch_offset (ip);
#if !defined(__native_client__)
- *((gconstpointer *)(ip + offset)) = target;
+ *((gconstpointer *)(ip + offset)) = target;
#else
- *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
+ *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
#endif
- break;
- }
- }
+ break;
+ }
}
}
gboolean
mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
{
- int i;
- gboolean can_write = TRUE;
/*
* If method_start is non-NULL we need to perform bound checks, since we access memory
* at code - offset we could go before the start of the method and end up in a different
memset (buf, 0, size);
memcpy (buf + offset - diff, method_start, diff + size - offset);
}
- code -= offset;
- for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
- int idx = mono_breakpoint_info_index [i];
- guint8 *ptr;
- if (idx < 1)
- continue;
- ptr = mono_breakpoint_info [idx].address;
- if (ptr >= code && ptr < code + size) {
- guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
- can_write = FALSE;
- /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
- buf [ptr - code] = saved_byte;
- }
- }
- return can_write;
+ return TRUE;
}
/*