MONO_NACL_ALIGN_MASK_OFF=1
AC_DEFINE(TARGET_NACL, 1, [...])
AC_DEFINE(__native_client_codegen__, 1, [...])
-else
- MONO_NACL_ALIGN_MASK_OFF=0
- AC_DEFINE(__default_codegen__, 1, [...])
fi
if test "x$enable_nacl_gc" = "xyes"; then
if test "x$TARGET" = "xAMD64" -o "x$TARGET" = "xX86"; then
fi
fi
-if test ${TARGET} = ARM; then
- if test "x${with_jumptables}" = "xyes"; then
- AC_DEFINE(USE_JUMP_TABLES, 1, Use jump tables in JIT)
- fi
-fi
-
if test ${TARGET} = unknown; then
CPPFLAGS="$CPPFLAGS -DNO_PORT"
AC_MSG_WARN("mono has not been ported to $host: some things may not work.")
AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */
} AMD64_REX_Bits;
-#if defined(__default_codegen__)
-
#define amd64_codegen_pre(inst)
#define amd64_codegen_post(inst)
-#elif defined(__native_client_codegen__)
-
-#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre();
-#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start);
-
-/* Because of rex prefixes, etc, call sequences are not constant size. */
-/* These pre- and post-sequence hooks remedy this by aligning the call */
-/* sequence after we emit it, since we will know the exact size then. */
-#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst);
-#define amd64_call_sequence_post(inst) \
- (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
-
-/* Native client can load/store using one of the following registers */
-/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */
-/* its upper 32 bits cleared and reference memory using r15 as the base. */
-#define amd64_is_valid_nacl_base(reg) \
- ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \
- (reg) == AMD64_RBP || (reg) == AMD64_RSP)
-
-#endif /*__native_client_codegen__*/
-
#ifdef TARGET_WIN32
#define AMD64_ARG_REG1 AMD64_RCX
#define AMD64_ARG_REG2 AMD64_RDX
#define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP))
#define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg)))
-#elif defined(__native_client_codegen__)
-/* AMD64 Native Client code may not write R15 */
-#define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10))
-#define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg)))
-
-#define AMD64_ARGUMENT_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9))
-#define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg)))
-
-#define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_RBP))
-#define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg)))
#else
#define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10))
#define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg)))
#endif
#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits)))
-#if defined(__default_codegen__)
-#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
- { \
- unsigned char _amd64_rex_bits = \
- (((width) > 4) ? AMD64_REX_W : 0) | \
- (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \
- (((reg_index) > 7) ? AMD64_REX_X : 0) | \
- (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \
- if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \
- } while (0)
-#elif defined(__native_client_codegen__)
#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
{ \
unsigned char _amd64_rex_bits = \
(((reg_modrm) > 7) ? AMD64_REX_R : 0) | \
(((reg_index) > 7) ? AMD64_REX_X : 0) | \
(((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \
- amd64_nacl_tag_rex((inst)); \
if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \
} while (0)
-#endif
typedef union {
guint64 val;
amd64_codegen_post(inst); \
} while (0)
-#if defined(__default_codegen__)
-
#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \
amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size))
#define amd64_test_reg_imm_size(inst, reg, imm, size) \
amd64_test_reg_imm_size_body(inst, reg, imm, size)
-#elif defined(__native_client_codegen__)
-/* NaCl modules may not directly update RSP or RBP other than direct copies */
-/* between them. Instead the lower 4 bytes are updated and then added to R15 */
-#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP))
-
-#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \
- do{ \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg(reg)) { \
- if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \
- g_assert_not_reached(); \
- amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \
- /* Use LEA instead of ADD to preserve flags */ \
- amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
- } else { \
- amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \
- } \
- amd64_codegen_post(inst); \
- } while(0)
-
-#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
- do { \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \
- if (((opc) != X86_ADD && (opc) != X86_SUB)) \
- g_assert_not_reached(); \
- amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \
- /* Use LEA instead of ADD to preserve flags */ \
- amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \
- } else { \
- amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \
- } \
- amd64_codegen_post(inst); \
- } while (0)
-
-#endif /*__native_client_codegen__*/
-
#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8)
#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8)
amd64_codegen_post(inst); \
} while (0)
-#if defined(__default_codegen__)
#define amd64_mov_reg_mem(inst,reg,mem,size) \
do { \
amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \
} while (0)
-#elif defined(__native_client_codegen__)
-/* We have to re-base memory reads because memory isn't zero based. */
-#define amd64_mov_reg_mem(inst,reg,mem,size) \
- do { \
- amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \
- } while (0)
-#endif /* __native_client_codegen__ */
#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \
do { \
x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \
} while (0)
-#if defined(__default_codegen__)
-
#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size))
#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \
amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
} while (0)
-#elif defined(__native_client_codegen__)
-
-#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
- do { \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg((reg))) { \
- /* Clear upper 32 bits with mov of size 4 */ \
- amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \
- /* Add %r15 using LEA to preserve flags */ \
- amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
- } else { \
- amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \
- } \
- amd64_codegen_post(inst); \
- } while(0)
-
-#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \
- do { \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg((reg))) { \
- /* Clear upper 32 bits with mov of size 4 */ \
- amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \
- /* Add %r15 */ \
- amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
- } else { \
- amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
- } \
- amd64_codegen_post(inst); \
- } while (0)
-
-#endif /*__native_client_codegen__*/
-
#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \
do { \
amd64_codegen_pre(inst); \
amd64_membase_emit ((inst), (reg), (basereg), (disp)); \
} while (0)
-#if defined(__default_codegen__)
#define amd64_lea_membase(inst,reg,basereg,disp) \
amd64_lea_membase_body((inst), (reg), (basereg), (disp))
-#elif defined(__native_client_codegen__)
-/* NaCl modules may not write directly into RSP/RBP. Instead, use a */
-/* 32-bit LEA and add R15 to the effective address */
-#define amd64_lea_membase(inst,reg,basereg,disp) \
- do { \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg(reg)) { \
- /* 32-bit LEA */ \
- amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \
- *(inst)++ = (unsigned char)0x8d; \
- amd64_membase_emit((inst), (reg), (basereg), (disp)); \
- /* Use a 64-bit LEA instead of an ADD to preserve flags */ \
- amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
- } else { \
- amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \
- } \
- amd64_codegen_post(inst); \
- } while (0)
-#endif /*__native_client_codegen__*/
/* Instruction are implicitly 64-bits so don't generate REX for just the size. */
#define amd64_push_reg(inst,reg) \
amd64_codegen_post(inst); \
} while (0)
-#if defined(__default_codegen__)
-
#define amd64_call_reg(inst,reg) \
do { \
amd64_emit_rex(inst, 0, 0, 0, (reg)); \
#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg))
-#elif defined(__native_client_codegen__)
-
-/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
-#define amd64_jump_reg_size(inst,reg,size) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
- amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
- amd64_emit_rex ((inst),0,0,0,(reg)); \
- x86_jump_reg((inst),((reg)&0x7)); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
-#define amd64_jump_mem_size(inst,mem,size) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \
- amd64_jump_reg_size((inst), AMD64_R11, 4); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-#define amd64_call_reg_internal(inst,reg) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
- amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
- amd64_emit_rex((inst), 0, 0, 0, (reg)); \
- x86_call_reg((inst), ((reg) & 0x7)); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-#define amd64_call_reg(inst,reg) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_call_sequence_pre(inst); \
- amd64_call_reg_internal((inst), (reg)); \
- amd64_call_sequence_post(inst); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-
-#define amd64_ret(inst) \
- do { \
- amd64_codegen_pre(inst); \
- amd64_pop_reg_body((inst), AMD64_R11); \
- amd64_jump_reg_size((inst), AMD64_R11, 8); \
- amd64_codegen_post(inst); \
- } while (0)
-
-#define amd64_leave(inst) \
- do { \
- amd64_codegen_pre(inst); \
- amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \
- amd64_pop_reg_body((inst), AMD64_R11); \
- amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \
- amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \
- amd64_codegen_post(inst); \
- } while (0)
-
-#define amd64_pop_reg(inst,reg) \
- do { \
- amd64_codegen_pre(inst); \
- if (amd64_is_nacl_stack_reg((reg))) { \
- amd64_pop_reg_body((inst), AMD64_R11); \
- amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \
- amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
- } else { \
- amd64_pop_reg_body((inst), (reg)); \
- } \
- amd64_codegen_post(inst); \
- } while (0)
-
-#endif /*__native_client_codegen__*/
-
#define amd64_movsd_reg_regp(inst,reg,regp) \
do { \
amd64_codegen_pre(inst); \
amd64_codegen_post(inst); \
} while (0)
-#if defined (__default_codegen__)
-
/* From the AMD64 Software Optimization Manual */
#define amd64_padding_size(inst,size) \
do { \
} \
} while (0)
-#elif defined(__native_client_codegen__)
-
-/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */
-/* 64-bit Native Client because they load into rSP/rBP or use duplicate */
-/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */
-/* Intel64 and IA-32 Architectures Optimization Reference Manual and */
-/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */
-
-#define amd64_padding_size(inst,size) \
- do { \
- unsigned char *code_start = (inst); \
- switch ((size)) { \
- /* xchg %eax,%eax, recognized by hardware as a NOP */ \
- case 1: *(inst)++ = 0x90; break; \
- /* xchg %ax,%ax */ \
- case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \
- break; \
- /* nop (%rax) */ \
- case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
- *(inst)++ = 0x00; \
- break; \
- /* nop 0x0(%rax) */ \
- case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
- x86_address_byte ((inst), 1, 0, AMD64_RAX); \
- x86_imm_emit8 ((inst), 0); \
- break; \
- /* nop 0x0(%rax,%rax) */ \
- case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
- x86_address_byte ((inst), 1, 0, 4); \
- x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
- x86_imm_emit8 ((inst), 0); \
- break; \
- /* nopw 0x0(%rax,%rax) */ \
- case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \
- *(inst)++ = 0x1f; \
- x86_address_byte ((inst), 1, 0, 4); \
- x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
- x86_imm_emit8 ((inst), 0); \
- break; \
- /* nop 0x0(%rax) (32-bit displacement) */ \
- case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
- x86_address_byte ((inst), 2, 0, AMD64_RAX); \
- x86_imm_emit32((inst), 0); \
- break; \
- /* nop 0x0(%rax,%rax) (32-bit displacement) */ \
- case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
- x86_address_byte ((inst), 2, 0, 4); \
- x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
- x86_imm_emit32 ((inst), 0); \
- break; \
- default: \
- g_assert_not_reached(); \
- } \
- g_assert(code_start + (size) == (unsigned char *)(inst)); \
- } while (0)
-
-
-/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
-#define amd64_call_membase_size(inst,basereg,disp,size) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_call_sequence_pre(inst); \
- amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \
- amd64_call_reg_internal((inst), AMD64_R11); \
- amd64_call_sequence_post(inst); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
-#define amd64_jump_membase_size(inst,basereg,disp,size) \
- do { \
- amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \
- amd64_jump_reg_size((inst), AMD64_R11, 4); \
- } while (0)
-
-/* On Native Client we can't jump more than INT_MAX in either direction */
-#define amd64_jump_code_size(inst,target,size) \
- do { \
- /* x86_jump_code used twice in case of */ \
- /* relocation by amd64_codegen_post */ \
- guint8* jump_start; \
- amd64_codegen_pre(inst); \
- assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \
- x86_jump_code((inst),(target)); \
- inst = amd64_codegen_post(inst); \
- jump_start = (inst); \
- x86_jump_code((inst),(target)); \
- mono_amd64_patch(jump_start, (target)); \
-} while (0)
-
-#endif /*__native_client_codegen__*/
-
/*
* SSE
*/
#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0)
#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0)
#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0)
-#if !defined( __native_client_codegen__ )
/* Defined above for Native Client, so they can be used in other macros */
#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0)
#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0)
-#endif
#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0)
#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0)
#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0)
#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0)
-#if defined(__default_codegen__)
#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0)
-#elif defined(__native_client_codegen__)
-#define amd64_branch_size(inst,cond,target,is_signed,size) \
- do { \
- /* amd64_branch_size_body used twice in */ \
- /* case of relocation by amd64_codegen_post */ \
- guint8* branch_start; \
- amd64_codegen_pre(inst); \
- amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
- inst = amd64_codegen_post(inst); \
- branch_start = inst; \
- amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
- mono_amd64_patch(branch_start, (target)); \
- } while (0)
-#endif
#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0)
#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0)
//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
-#if defined(__default_codegen__)
-
#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0)
#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0)
-#elif defined(__native_client_codegen__)
-/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
-#define amd64_call_imm_size(inst,disp,size) \
- do { \
- amd64_codegen_pre((inst)); \
- amd64_call_sequence_pre((inst)); \
- x86_call_imm((inst),(disp)); \
- amd64_call_sequence_post((inst)); \
- amd64_codegen_post((inst)); \
- } while (0)
-
-/* x86_call_code is called twice below, first so we can get the size of the */
-/* call sequence, and again so the exact offset from "inst" is used, since */
-/* the sequence could have moved from amd64_call_sequence_post. */
-/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
-#define amd64_call_code_size(inst,target,size) \
- do { \
- amd64_codegen_pre((inst)); \
- guint8* adjusted_start; \
- guint8* call_start; \
- amd64_call_sequence_pre((inst)); \
- x86_call_code((inst),(target)); \
- adjusted_start = amd64_call_sequence_post((inst)); \
- call_start = adjusted_start; \
- x86_call_code(adjusted_start, (target)); \
- amd64_codegen_post((inst)); \
- mono_amd64_patch(call_start, (target)); \
- } while (0)
-
-#endif /*__native_client_codegen__*/
-
//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0)
#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0)
#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
-#ifdef __native_client_codegen__
-#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0)
-#else
#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
-#endif
#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
#define X86_H
#include <assert.h>
-#ifdef __native_client_codegen__
-extern gint8 nacl_align_byte;
-#endif /* __native_client_codegen__ */
-
-
-#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
-#define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0)
-#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
-#define x86_call_sequence_post_val(inst) \
- (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
-#define x86_call_sequence_pre(inst) x86_call_sequence_pre_val((inst))
-#define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst))
-#else
#define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0)
/* Two variants are needed to avoid warnings */
#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
#define x86_call_sequence_post_val(inst) _code_start
#define x86_call_sequence_pre(inst)
#define x86_call_sequence_post(inst)
-#endif /* __native_client_codegen__ */
-
/*
// x86 register numbers
} \
} while (0)
-#if defined(__native_client_codegen__) && defined(TARGET_AMD64)
-#define x86_membase_emit(inst,r,basereg,disp) \
- do { \
- amd64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \
- } while (0)
-#else /* __default_codegen__ || 32-bit NaCl codegen */
#define x86_membase_emit(inst,r,basereg,disp) \
do { \
x86_membase_emit_body((inst),(r),(basereg),(disp)); \
} while (0)
-#endif
#define kMaxMemindexEmitPadding 6
else assert (0); \
} while (0)
-#if defined( __native_client_codegen__ ) && defined(TARGET_X86)
-
-#define x86_skip_nops(inst) \
- do { \
- int in_nop = 0; \
- do { \
- in_nop = 0; \
- if (inst[0] == 0x90) { \
- in_nop = 1; \
- inst += 1; \
- } \
- if (inst[0] == 0x8b && inst[1] == 0xc0) { \
- in_nop = 1; \
- inst += 2; \
- } \
- if (inst[0] == 0x8d && inst[1] == 0x6d \
- && inst[2] == 0x00) { \
- in_nop = 1; \
- inst += 3; \
- } \
- if (inst[0] == 0x8d && inst[1] == 0x64 \
- && inst[2] == 0x24 && inst[3] == 0x00) { \
- in_nop = 1; \
- inst += 4; \
- } \
- /* skip inst+=5 case because it's the 4-byte + 1-byte case */ \
- if (inst[0] == 0x8d && inst[1] == 0xad \
- && inst[2] == 0x00 && inst[3] == 0x00 \
- && inst[4] == 0x00 && inst[5] == 0x00) { \
- in_nop = 1; \
- inst += 6; \
- } \
- if (inst[0] == 0x8d && inst[1] == 0xa4 \
- && inst[2] == 0x24 && inst[3] == 0x00 \
- && inst[4] == 0x00 && inst[5] == 0x00 \
- && inst[6] == 0x00 ) { \
- in_nop = 1; \
- inst += 7; \
- } \
- } while ( in_nop ); \
- } while (0)
-
-#if defined(__native_client__)
-#define x86_patch(ins,target) \
- do { \
- unsigned char* inst = (ins); \
- guint8* new_target = nacl_modify_patch_target((target)); \
- x86_skip_nops((inst)); \
- x86_do_patch((inst), new_target); \
- } while (0)
-#else /* __native_client__ */
-#define x86_patch(ins,target) \
- do { \
- unsigned char* inst = (ins); \
- guint8* new_target = (target); \
- x86_skip_nops((inst)); \
- x86_do_patch((inst), new_target); \
- } while (0)
-#endif /* __native_client__ */
-
-#else
#define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0)
-#endif /* __native_client_codegen__ */
-#ifdef __native_client_codegen__
-/* The breakpoint instruction is illegal in Native Client, although the HALT */
-/* instruction is allowed. The breakpoint is used several places in mini-x86.c */
-/* and exceptions-x86.c. */
-#define x86_breakpoint(inst) \
- do { \
- *(inst)++ = 0xf4; \
- } while (0)
-#else
#define x86_breakpoint(inst) \
do { \
*(inst)++ = 0xcc; \
} while (0)
-#endif
#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0)
#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0)
#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0)
#define x86_movsd(inst) x86_movsl((inst))
-#if defined(__default_codegen__)
#define x86_prefix(inst,p) \
do { \
*(inst)++ =(unsigned char) (p); \
} while (0)
-#elif defined(__native_client_codegen__)
-#if defined(TARGET_X86)
-/* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */
-/* This keeps us from having to call x86_codegen_pre with specific */
-/* knowledge of the size of the instruction that follows it, and */
-/* localizes the alignment requirement to this spot. */
-#define x86_prefix(inst,p) \
- do { \
- x86_codegen_pre(&(inst), kNaClAlignment - 1); \
- *(inst)++ =(unsigned char) (p); \
- } while (0)
-#elif defined(TARGET_AMD64)
-/* We need to tag any prefixes so we can perform proper membase sandboxing */
-/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */
-#define x86_prefix(inst,p) \
- do { \
- amd64_nacl_tag_legacy_prefix((inst)); \
- *(inst)++ =(unsigned char) (p); \
- } while (0)
-
-#endif /* TARGET_AMD64 */
-
-#endif /* __native_client_codegen__ */
#define x86_mfence(inst) \
do { \
} while (0)
#endif
-#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
-#define x86_jump_reg(inst,reg) do { \
- x86_codegen_pre(&(inst), 5); \
- *(inst)++ = (unsigned char)0x83; /* and */ \
- x86_reg_emit ((inst), 4, (reg)); /* reg */ \
- *(inst)++ = (unsigned char)nacl_align_byte; \
- *(inst)++ = (unsigned char)0xff; \
- x86_reg_emit ((inst), 4, (reg)); \
- } while (0)
-
-/* Let's hope ECX is available for these... */
-#define x86_jump_mem(inst,mem) do { \
- x86_mov_reg_mem(inst, (X86_ECX), (mem), 4); \
- x86_jump_reg(inst, (X86_ECX)); \
- } while (0)
-
-#define x86_jump_membase(inst,basereg,disp) do { \
- x86_mov_reg_membase(inst, (X86_ECX), basereg, disp, 4); \
- x86_jump_reg(inst, (X86_ECX)); \
- } while (0)
-
-/* like x86_jump_membase, but force a 32-bit displacement */
-#define x86_jump_membase32(inst,basereg,disp) do { \
- x86_codegen_pre(&(inst), 6); \
- *(inst)++ = (unsigned char)0x8b; \
- x86_address_byte ((inst), 2, X86_ECX, (basereg)); \
- x86_imm_emit32 ((inst), (disp)); \
- x86_jump_reg(inst, (X86_ECX)); \
- } while (0)
-#else /* __native_client_codegen__ */
#define x86_jump_reg(inst,reg) \
do { \
*(inst)++ = (unsigned char)0xff; \
*(inst)++ = (unsigned char)0xff; \
x86_membase_emit ((inst), 4, (basereg), (disp)); \
} while (0)
-#endif /* __native_client_codegen__ */
/*
* target is a pointer in our buffer.
*/
} \
} while (0)
-#if defined(__default_codegen__)
-#define x86_jump_code(inst,target) \
- do { \
- x86_jump_code_body((inst),(target)); \
- } while (0)
-#elif defined(__native_client_codegen__) && defined(TARGET_X86)
#define x86_jump_code(inst,target) \
do { \
- guint8* jump_start = (inst); \
- x86_jump_code_body((inst),(target)); \
- x86_patch(jump_start, (target)); \
- } while (0)
-#elif defined(__native_client_codegen__) && defined(TARGET_AMD64)
-#define x86_jump_code(inst,target) \
- do { \
- /* jump_code_body is used twice because there are offsets */ \
- /* calculated based on the IP, which can change after the */ \
- /* call to amd64_codegen_post */ \
- amd64_codegen_pre(inst); \
- x86_jump_code_body((inst),(target)); \
- inst = amd64_codegen_post(inst); \
x86_jump_code_body((inst),(target)); \
} while (0)
-#endif /* __native_client_codegen__ */
#define x86_jump_disp(inst,disp) \
do { \
} \
} while (0)
-#if defined(__default_codegen__)
#define x86_branch(inst,cond,target,is_signed) \
do { \
x86_branch_body((inst),(cond),(target),(is_signed)); \
} while (0)
-#elif defined(__native_client_codegen__)
-#define x86_branch(inst,cond,target,is_signed) \
- do { \
- /* branch_body is used twice because there are offsets */ \
- /* calculated based on the IP, which can change after */ \
- /* the call to amd64_codegen_post */ \
- amd64_codegen_pre(inst); \
- x86_branch_body((inst),(cond),(target),(is_signed)); \
- inst = amd64_codegen_post(inst); \
- x86_branch_body((inst),(cond),(target),(is_signed)); \
- } while (0)
-#endif /* __native_client_codegen__ */
#endif /* TARGET_AMD64 */
} while (0)
-#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
-#define x86_call_reg_internal(inst,reg) \
- do { \
- *(inst)++ = (unsigned char)0x83; /* and */ \
- x86_reg_emit ((inst), 4, (reg)); /* reg */ \
- *(inst)++ = (unsigned char)nacl_align_byte; \
- *(inst)++ = (unsigned char)0xff; /* call */ \
- x86_reg_emit ((inst), 2, (reg)); /* reg */ \
- } while (0)
-
-#define x86_call_reg(inst, reg) do { \
- x86_call_sequence_pre((inst)); \
- x86_call_reg_internal(inst, reg); \
- x86_call_sequence_post((inst)); \
- } while (0)
-
-
-/* It appears that x86_call_mem() is never used, so I'm leaving it out. */
-#define x86_call_membase(inst,basereg,disp) do { \
- x86_call_sequence_pre((inst)); \
- /* x86_mov_reg_membase() inlined so its fixed size */ \
- *(inst)++ = (unsigned char)0x8b; \
- x86_address_byte ((inst), 2, (X86_ECX), (basereg)); \
- x86_imm_emit32 ((inst), (disp)); \
- x86_call_reg_internal(inst, X86_ECX); \
- x86_call_sequence_post((inst)); \
- } while (0)
-#else /* __native_client_codegen__ */
#define x86_call_reg(inst,reg) \
do { \
*(inst)++ = (unsigned char)0xff; \
*(inst)++ = (unsigned char)0xff; \
x86_membase_emit ((inst), 2, (basereg), (disp)); \
} while (0)
-#endif /* __native_client_codegen__ */
-
-
-#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
-
-#define x86_call_code(inst,target) \
- do { \
- int _x86_offset; \
- guint8* call_start; \
- guint8* _aligned_start; \
- x86_call_sequence_pre_val((inst)); \
- _x86_offset = (unsigned char*)(target) - (inst); \
- _x86_offset -= 5; \
- x86_call_imm_body ((inst), _x86_offset); \
- _aligned_start = x86_call_sequence_post_val((inst)); \
- call_start = _aligned_start; \
- _x86_offset = (unsigned char*)(target) - (_aligned_start); \
- _x86_offset -= 5; \
- x86_call_imm_body ((_aligned_start), _x86_offset); \
- x86_patch(call_start, (target)); \
- } while (0)
-
-#define SIZE_OF_RET 6
-#define x86_ret(inst) do { \
- *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
- x86_codegen_pre(&(inst), 5); \
- *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
- *(inst)++ = (unsigned char)0xe1; \
- *(inst)++ = (unsigned char)nacl_align_byte; \
- *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
- *(inst)++ = (unsigned char)0xe1; \
- } while (0)
-
-/* pop return address */
-/* pop imm bytes from stack */
-/* return */
-#define x86_ret_imm(inst,imm) do { \
- *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
- x86_alu_reg_imm ((inst), X86_ADD, X86_ESP, imm); \
- x86_codegen_pre(&(inst), 5); \
- *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
- *(inst)++ = (unsigned char)0xe1; \
- *(inst)++ = (unsigned char)nacl_align_byte; \
- *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
- *(inst)++ = (unsigned char)0xe1; \
-} while (0)
-#else /* __native_client_codegen__ */
#define x86_call_code(inst,target) \
do { \
x86_imm_emit16 ((inst), (imm)); \
} \
} while (0)
-#endif /* __native_client_codegen__ */
#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \
do { \
} \
} while (0)
-#ifdef __native_client_codegen__
-
-#define kx86NaClLengthOfCallReg 5
-#define kx86NaClLengthOfCallImm 5
-#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6)
-
-#endif /* __native_client_codegen__ */
-
#define x86_prolog(inst,frame_size,reg_mask) \
do { \
unsigned i, m = 1; \
void
mono_domain_code_commit (MonoDomain *domain, void *data, int size, int newsize);
-void *
-nacl_domain_get_code_dest (MonoDomain *domain, void *data);
-
-void
-nacl_domain_code_validate (MonoDomain *domain, guint8 **buf_base, int buf_size, guint8 **code_end);
-
void
mono_domain_code_foreach (MonoDomain *domain, MonoCodeManagerFunc func, void *user_data);
mono_domain_unlock (domain);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
-/*
- * Given the temporary buffer (allocated by mono_domain_code_reserve) into which
- * we are generating code, return a pointer to the destination in the dynamic
- * code segment into which the code will be copied when mono_domain_code_commit
- * is called.
- * LOCKING: Acquires the domain lock.
- */
-void *
-nacl_domain_get_code_dest (MonoDomain *domain, void *data)
-{
- void *dest;
- mono_domain_lock (domain);
- dest = nacl_code_manager_get_code_dest (domain->code_mp, data);
- mono_domain_unlock (domain);
- return dest;
-}
-
-/*
- * Convenience function which calls mono_domain_code_commit to validate and copy
- * the code. The caller sets *buf_base and *buf_size to the start and size of
- * the buffer (allocated by mono_domain_code_reserve), and *code_end to the byte
- * after the last instruction byte. On return, *buf_base will point to the start
- * of the copied in the code segment, and *code_end will point after the end of
- * the copied code.
- */
-void
-nacl_domain_code_validate (MonoDomain *domain, guint8 **buf_base, int buf_size, guint8 **code_end)
-{
- guint8 *tmp = nacl_domain_get_code_dest (domain, *buf_base);
- mono_domain_code_commit (domain, *buf_base, buf_size, *code_end - *buf_base);
- *code_end = tmp + (*code_end - *buf_base);
- *buf_base = tmp;
-}
-
-#else
-
-/* no-op versions of Native Client functions */
-
-void *
-nacl_domain_get_code_dest (MonoDomain *domain, void *data)
-{
- return data;
-}
-
-void
-nacl_domain_code_validate (MonoDomain *domain, guint8 **buf_base, int buf_size, guint8 **code_end)
-{
-}
-
-#endif
-
/*
* mono_domain_code_foreach:
* Iterate over the code thunks of the code manager of @domain.
mono_img_writer_emit_byte (acfg->w, val);
}
-#ifdef __native_client_codegen__
-static inline void
-emit_nacl_call_alignment (MonoAotCompile *acfg)
-{
- mono_img_writer_emit_nacl_call_alignment (acfg->w);
-}
-#endif
-
#if defined(TARGET_WIN32) && defined(TARGET_X86)
static G_GNUC_UNUSED void
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
-#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && defined(__native_client_codegen__)
-#undef AOT_FUNC_ALIGNMENT
-#define AOT_FUNC_ALIGNMENT 32
-#endif
#if defined(TARGET_POWERPC64) && !defined(__mono_ilp32__)
#define PPC_LD_OP "ld"
arch_emit_plt_entry (MonoAotCompile *acfg, const char *got_symbol, int offset, int info_offset)
{
#if defined(TARGET_X86)
-#if defined(__default_codegen__)
/* jmp *<offset>(%ebx) */
emit_byte (acfg, 0xff);
emit_byte (acfg, 0xa3);
emit_int32 (acfg, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, info_offset);
-#elif defined(__native_client_codegen__)
- const guint8 kSizeOfNaClJmp = 11;
- guint8 bytes[kSizeOfNaClJmp];
- guint8 *pbytes = &bytes[0];
-
- x86_jump_membase32 (pbytes, X86_EBX, offset);
- emit_bytes (acfg, bytes, kSizeOfNaClJmp);
- /* four bytes of data, used by mono_arch_patch_plt_entry */
- /* For Native Client, make this work with data embedded in push. */
- emit_byte (acfg, 0x68); /* hide data in a push */
- emit_int32 (acfg, info_offset);
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_AMD64)
-#if defined(__default_codegen__)
emit_unset_mode (acfg);
fprintf (acfg->fp, "jmp *%s+%d(%%rip)\n", got_symbol, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, info_offset);
acfg->stats.plt_size += 10;
-#elif defined(__native_client_codegen__)
- guint8 buf [256];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, got_symbol, ".", offset - 4);
-
- amd64_jump_reg (code, AMD64_R11);
- /* This should be constant for the plt patch */
- g_assert ((size_t)(code-buf_aligned) == 10);
- emit_bytes (acfg, buf_aligned, code - buf_aligned);
-
- /* Hide data in a push imm32 so it passes validation */
- emit_byte (acfg, 0x68); /* push */
- emit_int32 (acfg, info_offset);
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [256];
guint8 *code;
* - all the trampolines should be of the same length.
*/
#if defined(TARGET_AMD64)
-#if defined(__default_codegen__)
/* This should be exactly 8 bytes long */
*tramp_size = 8;
/* call *<offset>(%rip) */
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
emit_zero_bytes (acfg, 1);
}
-#elif defined(__native_client_codegen__)
- guint8 buf [256];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
- guint8 *call_start;
- size_t call_len;
- int got_offset;
-
- /* Emit this call in 'code' so we can find out how long it is. */
- amd64_call_reg (code, AMD64_R11);
- call_start = mono_arch_nacl_skip_nops (buf_aligned);
- call_len = code - call_start;
-
- /* The tramp_size is twice the NaCl alignment because it starts with */
- /* a call which needs to be aligned to the end of the boundary. */
- *tramp_size = kNaClAlignment*2;
- {
- /* Emit nops to align call site below which is 7 bytes plus */
- /* the length of the call sequence emitted above. */
- /* Note: this requires the specific trampoline starts on a */
- /* kNaclAlignedment aligned address, which it does because */
- /* it's its own function that is aligned. */
- guint8 nop_buf[256];
- guint8 *nopbuf_aligned = ALIGN_TO (nop_buf, kNaClAlignment);
- guint8 *nopbuf_end = mono_arch_nacl_pad (nopbuf_aligned, kNaClAlignment - 7 - (call_len));
- emit_bytes (acfg, nopbuf_aligned, nopbuf_end - nopbuf_aligned);
- }
- /* The trampoline is stored at the offset'th pointer, the -4 is */
- /* present because RIP relative addressing starts at the end of */
- /* the current instruction, while the label "." is relative to */
- /* the beginning of the current asm location, which in this case */
- /* is not the mov instruction, but the offset itself, due to the */
- /* way the bytes and ints are emitted here. */
- got_offset = (offset * sizeof(gpointer)) - 4;
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
-
- /* naclcall %r11 */
- emit_bytes (acfg, call_start, call_len);
-
- /* The arg is stored at the offset+1 pointer, relative to beginning */
- /* of trampoline: 7 for mov, plus the call length, and 1 for push. */
- got_offset = ((offset + 1) * sizeof(gpointer)) + 7 + call_len + 1;
-
- /* We can't emit this data directly, hide in a "push imm32" */
- emit_byte (acfg, '\x68'); /* push */
- emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
- emit_alignment (acfg, kNaClAlignment);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to generic trampoline */
x86_jump_reg (code, X86_ECX);
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE(17, kNaClAlignment);
+ *tramp_size = 17;
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
arch_emit_static_rgctx_trampoline (MonoAotCompile *acfg, int offset, int *tramp_size)
{
#if defined(TARGET_AMD64)
-#if defined(__default_codegen__)
/* This should be exactly 13 bytes long */
*tramp_size = 13;
emit_byte (acfg, '\x25');
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
}
-#elif defined(__native_client_codegen__)
- guint8 buf [128];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
-
- /* mov <OFFSET>(%rip), %r10d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x15');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
-
- /* nacljmp *%r11 */
- amd64_jump_reg (code, AMD64_R11);
- emit_bytes (acfg, buf_aligned, code - buf_aligned);
-
- emit_alignment (acfg, kNaClAlignment);
- *tramp_size = kNaClAlignment;
-#endif /*__native_client_codegen__*/
-
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
-
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE (15, kNaClAlignment);
+ *tramp_size = 15;
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
{
#if defined(TARGET_AMD64)
guint8 *buf, *code;
-#if defined(__native_client_codegen__)
- guint8 *buf_alloc;
-#endif
guint8 *labels [16];
guint8 mov_buf[3];
guint8 *mov_buf_ptr = mov_buf;
const int kSizeOfMove = 7;
-#if defined(__default_codegen__)
+
code = buf = (guint8 *)g_malloc (256);
-#elif defined(__native_client_codegen__)
- buf_alloc = g_malloc (256 + kNaClAlignment + kSizeOfMove);
- buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
- /* The RIP relative move below is emitted first */
- buf += kSizeOfMove;
- code = buf;
-#endif
/* FIXME: Optimize this, i.e. use binary search etc. */
/* Maybe move the body into a separate function (slower, but much smaller) */
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf + kSizeOfMove;
-#if defined(__native_client_codegen__)
- /* The tramp will be padded to the next kNaClAlignment bundle. */
- *tramp_size = ALIGN_TO ((*tramp_size), kNaClAlignment);
-#endif
-#if defined(__default_codegen__)
g_free (buf);
-#elif defined(__native_client_codegen__)
- g_free (buf_alloc);
-#endif
#elif defined(TARGET_X86)
guint8 *buf, *code;
-#ifdef __native_client_codegen__
- guint8 *buf_alloc;
-#endif
guint8 *labels [16];
-#if defined(__default_codegen__)
code = buf = g_malloc (256);
-#elif defined(__native_client_codegen__)
- buf_alloc = g_malloc (256 + kNaClAlignment);
- code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
-#endif
/* Allocate a temporary stack slot */
x86_push_reg (code, X86_EAX);
mono_x86_patch (labels [3], code);
x86_breakpoint (code);
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
-#if defined(__default_codegen__)
g_free (buf);
-#elif defined(__native_client_codegen__)
- g_free (buf_alloc);
-#endif
#elif defined(TARGET_ARM)
guint8 buf [128];
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
-
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE (15, kNaClAlignment);
+ *tramp_size = 15;
g_assert (code - buf == *tramp_size);
#elif defined(TARGET_ARM)
guint8 buf [128];
emit_line (acfg);
emit_section_change (acfg, ".text", 0);
- emit_alignment_code (acfg, NACL_SIZE(16, kNaClAlignment));
+ emit_alignment_code (acfg, 16);
emit_info_symbol (acfg, "plt");
emit_label (acfg, acfg->plt_symbol);
ji = info->ji;
unwind_ops = info->unwind_ops;
-#ifdef __native_client_codegen__
- mono_nacl_fix_patches (code, ji);
-#endif
-
/* Emit code */
sprintf (start_symbol, "%s%s", acfg->user_symbol_prefix, name);
default:
g_assert_not_reached ();
}
-#ifdef __native_client_codegen__
- /* align to avoid 32-byte boundary crossings */
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif
-
if (!acfg->trampoline_size [ntype]) {
g_assert (tramp_size);
acfg->trampoline_size [ntype] = tramp_size;
* Emit some padding so the local symbol for the first method doesn't have the
* same address as 'methods'.
*/
-#if defined(__default_codegen__)
emit_padding (acfg, 16);
-#elif defined(__native_client_codegen__)
- {
- const int kPaddingSize = 16;
- guint8 pad_buffer[kPaddingSize];
- mono_arch_nacl_pad (pad_buffer, kPaddingSize);
- emit_bytes (acfg, pad_buffer, kPaddingSize);
- }
-#endif
for (oindex = 0; oindex < acfg->method_order->len; ++oindex) {
MonoCompile *cfg;
sprintf (symbol, "ut_%d", get_method_index (acfg, method));
emit_section_change (acfg, ".text", 0);
-#ifdef __native_client_codegen__
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif
if (acfg->thumb_mixed && cfg->compile_llvm) {
emit_set_thumb_mode (acfg);
#
# See the code in mini-x86.c for more details on how the specifiers are used.
#
-#
-# Native Client Note: NaCl call sequences do not really reach > 32 bytes but
-# the maximum length can be high, so if we get unlucky and wind up trying to
-# emit a call sequence such that we are one or two bytes too long, we need to
-# pad out almost an entire 32 bytes.
-#
break: len:2
tailcall: len:120 clob:c
seq_point: len:46 clob:c
il_seq_point: len:0
-long_add: dest:i src1:i src2:i len:3 clob:1 nacl:6
-long_sub: dest:i src1:i src2:i len:3 clob:1 nacl:6
+long_add: dest:i src1:i src2:i len:3 clob:1
+long_sub: dest:i src1:i src2:i len:3 clob:1
long_mul: dest:i src1:i src2:i len:4 clob:1
long_div: dest:a src1:a src2:i len:16 clob:d
long_div_un: dest:a src1:a src2:i len:16 clob:d
long_max: dest:i src1:i src2:i len:16 clob:1
long_max_un: dest:i src1:i src2:i len:16 clob:1
-throw: src1:i len:18 nacl:50
-rethrow: src1:i len:18 nacl:50
+throw: src1:i len:18
+rethrow: src1:i len:18
start_handler: len:16
-endfinally: len:9 nacl:22
-endfilter: src1:a len:9 nacl:19
+endfinally: len:9
+endfilter: src1:a len:9
get_ex_obj: dest:a len:16
ckfinite: dest:f src1:f len:43
fcompare: src1:f src2:f clob:a len:13
rcompare: src1:f src2:f clob:a len:13
oparglist: src1:b len:11
-checkthis: src1:b len:5 nacl:8
-call: dest:a clob:c len:32 nacl:64
-voidcall: clob:c len:32 nacl:64
-voidcall_reg: src1:i clob:c len:32 nacl:64
-voidcall_membase: src1:b clob:c len:32 nacl:64
+checkthis: src1:b len:5
+call: dest:a clob:c len:32
+voidcall: clob:c len:32
+voidcall_reg: src1:i clob:c len:32
+voidcall_membase: src1:b clob:c len:32
fcall: dest:f len:64 clob:c
fcall_reg: dest:f src1:i len:64 clob:c
fcall_membase: dest:f src1:b len:64 clob:c
vcall: len:64 clob:c
vcall_reg: src1:i len:64 clob:c
vcall_membase: src1:b len:64 clob:c
-call_reg: dest:a src1:i len:32 clob:c nacl:64
-call_membase: dest:a src1:b len:32 clob:c nacl:64
+call_reg: dest:a src1:i len:32 clob:c
+call_membase: dest:a src1:b len:32 clob:c
iconst: dest:i len:10
i8const: dest:i len:10
r4const: dest:f len:14
r8const: dest:f len:9
store_membase_imm: dest:b len:15
-store_membase_reg: dest:b src1:i len:9 nacl:11
-storei8_membase_reg: dest:b src1:i len:9 nacl:11
-storei1_membase_imm: dest:b len:11 nacl:15
-storei1_membase_reg: dest:b src1:c len:9 nacl:11
-storei2_membase_imm: dest:b len:13 nacl:15
-storei2_membase_reg: dest:b src1:i len:9 nacl:11
-storei4_membase_imm: dest:b len:13 nacl:15
-storei4_membase_reg: dest:b src1:i len:9 nacl:11
+store_membase_reg: dest:b src1:i len:9
+storei8_membase_reg: dest:b src1:i len:9
+storei1_membase_imm: dest:b len:11
+storei1_membase_reg: dest:b src1:c len:9
+storei2_membase_imm: dest:b len:13
+storei2_membase_reg: dest:b src1:i len:9
+storei4_membase_imm: dest:b len:13
+storei4_membase_reg: dest:b src1:i len:9
storei8_membase_imm: dest:b len:18
storer4_membase_reg: dest:b src1:f len:15
storer8_membase_reg: dest:b src1:f len:10
-load_membase: dest:i src1:b len:8 nacl:12
-loadi1_membase: dest:c src1:b len:9 nacl:12
-loadu1_membase: dest:c src1:b len:9 nacl:12
-loadi2_membase: dest:i src1:b len:9 nacl:12
-loadu2_membase: dest:i src1:b len:9 nacl:12
-loadi4_membase: dest:i src1:b len:9 nacl:12
-loadu4_membase: dest:i src1:b len:9 nacl:12
-loadi8_membase: dest:i src1:b len:18 nacl:14
+load_membase: dest:i src1:b len:8
+loadi1_membase: dest:c src1:b len:9
+loadu1_membase: dest:c src1:b len:9
+loadi2_membase: dest:i src1:b len:9
+loadu2_membase: dest:i src1:b len:9
+loadi4_membase: dest:i src1:b len:9
+loadu4_membase: dest:i src1:b len:9
+loadi8_membase: dest:i src1:b len:18
loadr4_membase: dest:f src1:b len:16
loadr8_membase: dest:f src1:b len:16
loadu4_mem: dest:i len:10
amd64_loadi8_memindex: dest:i src1:i src2:i len:10
move: dest:i src1:i len:3
-add_imm: dest:i src1:i len:8 clob:1 nacl:11
-sub_imm: dest:i src1:i len:8 clob:1 nacl:11
+add_imm: dest:i src1:i len:8 clob:1
+sub_imm: dest:i src1:i len:8 clob:1
mul_imm: dest:i src1:i len:12
and_imm: dest:i src1:i len:8 clob:1
or_imm: dest:i src1:i len:8 clob:1
move_i4_to_f: dest:f src1:i len:16
move_f_to_i8: dest:i src1:f len:5
move_i8_to_f: dest:f src1:i len:5
-call_handler: len:14 clob:c nacl:52
+call_handler: len:14 clob:c
aot_const: dest:i len:10
gc_safe_point: clob:c src1:i len:40
x86_test_null: src1:i len:5
x86_push_membase: src1:b len:8
x86_push_obj: src1:b len:40
x86_lea: dest:i src1:i src2:i len:8
-x86_lea_membase: dest:i src1:i len:11 nacl:14
+x86_lea_membase: dest:i src1:i len:11
x86_xchg: src1:i src2:i clob:x len:2
x86_fpop: src1:f len:3
x86_seteq_membase: src1:b len:9
adc_imm: dest:i src1:i len:8 clob:1
sbb: dest:i src1:i src2:i len:3 clob:1
sbb_imm: dest:i src1:i len:8 clob:1
-br_reg: src1:i len:3 nacl:8
+br_reg: src1:i len:3
sin: dest:f src1:f len:32
cos: dest:f src1:f len:32
abs: dest:f src1:f clob:1 len:32
lsubcc: dest:i src1:i src2:i len:3 clob:1
# 32 bit opcodes
-int_add: dest:i src1:i src2:i clob:1 len:4 nacl:7
-int_sub: dest:i src1:i src2:i clob:1 len:4 nacl:7
+int_add: dest:i src1:i src2:i clob:1 len:4
+int_sub: dest:i src1:i src2:i clob:1 len:4
int_mul: dest:i src1:i src2:i clob:1 len:4
int_mul_ovf: dest:i src1:i src2:i clob:1 len:32
int_mul_ovf_un: dest:i src1:i src2:i clob:1 len:32
int_sbb_imm: dest:i src1:i clob:1 len:8
int_addcc: dest:i src1:i src2:i clob:1 len:16
int_subcc: dest:i src1:i src2:i clob:1 len:16
-int_add_imm: dest:i src1:i clob:1 len:8 nacl:10
-int_sub_imm: dest:i src1:i clob:1 len:8 nacl:10
+int_add_imm: dest:i src1:i clob:1 len:8
+int_sub_imm: dest:i src1:i clob:1 len:8
int_mul_imm: dest:i src1:i clob:1 len:32
int_div_imm: dest:a src1:i clob:d len:32
int_div_un_imm: dest:a src1:i clob:d len:32
cmov_lle_un: dest:i src1:i src2:i len:16 clob:1
cmov_llt_un: dest:i src1:i src2:i len:16 clob:1
-long_add_imm: dest:i src1:i clob:1 len:12 nacl:15
-long_sub_imm: dest:i src1:i clob:1 len:12 nacl:15
+long_add_imm: dest:i src1:i clob:1 len:12
+long_sub_imm: dest:i src1:i clob:1 len:12
long_and_imm: dest:i src1:i clob:1 len:12
long_or_imm: dest:i src1:i clob:1 len:12
long_xor_imm: dest:i src1:i clob:1 len:12
vcall2_reg: src1:i len:64 clob:c
vcall2_membase: src1:b len:64 clob:c
-dyn_call: src1:i src2:i len:128 clob:c nacl:128
+dyn_call: src1:i src2:i len:128 clob:c
localloc_imm: dest:i len:96
throw: src1:i len:13
rethrow: src1:i len:13
start_handler: len:16
-endfinally: len:16 nacl:21
-endfilter: src1:a len:16 nacl:21
+endfinally: len:16
+endfilter: src1:a len:16
get_ex_obj: dest:a len:16
ckfinite: dest:f src1:f len:32
checkthis: src1:b len:3
voidcall: len:17 clob:c
voidcall_reg: src1:i len:11 clob:c
-voidcall_membase: src1:b len:16 nacl:17 clob:c
+voidcall_membase: src1:b len:16 clob:c
fcall: dest:f len:17 clob:c
fcall_reg: dest:f src1:i len:11 clob:c
-fcall_membase: dest:f src1:b len:16 nacl:17 clob:c
+fcall_membase: dest:f src1:b len:16 clob:c
lcall: dest:l len:17 clob:c
lcall_reg: dest:l src1:i len:11 clob:c
-lcall_membase: dest:l src1:b len:16 nacl:17 clob:c
+lcall_membase: dest:l src1:b len:16 clob:c
vcall: len:17 clob:c
vcall_reg: src1:i len:11 clob:c
-vcall_membase: src1:b len:16 nacl:17 clob:c
-call_reg: dest:a src1:i len:11 nacl:14 clob:c
-call_membase: dest:a src1:b len:16 nacl:18 clob:c
+vcall_membase: src1:b len:16 clob:c
+call_reg: dest:a src1:i len:11 clob:c
+call_membase: dest:a src1:b len:16 clob:c
iconst: dest:i len:5
r4const: dest:f len:15
r8const: dest:f len:16
adc_imm: dest:i src1:i len:6 clob:1
sbb: dest:i src1:i src2:i len:2 clob:1
sbb_imm: dest:i src1:i len:6 clob:1
-br_reg: src1:i len:2 nacl:5
+br_reg: src1:i len:2
sin: dest:f src1:f len:6
cos: dest:f src1:f len:6
abs: dest:f src1:f len:2
vcall2: len:17 clob:c
vcall2_reg: src1:i len:11 clob:c
-vcall2_membase: src1:b len:16 nacl:17 clob:c
+vcall2_membase: src1:b len:16 clob:c
localloc_imm: dest:i len:120
NULL
};
-#ifdef __native_client_codegen__
-extern gint8 nacl_align_byte;
-#endif
#ifdef __native_client__
extern char *nacl_mono_path;
#endif
" --trace[=EXPR] Enable tracing, use --help-trace for details\n"
" --jitmap Output a jit method map to /tmp/perf-PID.map\n"
" --help-devel Shows more options available to developers\n"
-#ifdef __native_client_codegen__
- " --nacl-align-mask-off Turn off Native Client 32-byte alignment mask (for debug only)\n"
-#endif
"\n"
"Runtime:\n"
" --config FILE Loads FILE as the Mono config\n"
#endif
} else if (strcmp (argv [i], "--nollvm") == 0){
mono_use_llvm = FALSE;
-#ifdef __native_client_codegen__
- } else if (strcmp (argv [i], "--nacl-align-mask-off") == 0){
- nacl_align_byte = -1; /* 0xff */
-#endif
#ifdef __native_client__
} else if (strcmp (argv [i], "--nacl-mono-path") == 0){
nacl_mono_path = g_strdup(argv[++i]);
}
#ifdef __native_client_codegen__
- if (g_getenv ("MONO_NACL_ALIGN_MASK_OFF"))
- {
- nacl_align_byte = -1; /* 0xff */
- }
if (!nacl_null_checks_off) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->explicit_null_checks = TRUE;
/* Restore all registers except %rip and %r11 */
gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
for (i = 0; i < AMD64_NREG; ++i) {
-#if defined(__native_client_codegen__)
- if (i == AMD64_R15)
- continue;
-#endif
if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
}
/* jump to the saved IP */
amd64_jump_reg (code, AMD64_R11);
- nacl_global_codeman_validate (&start, 256, &code);
-
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
guint32 pos;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- const guint kMaxCodeSize = NACL_SIZE (128, 256);
+ const guint kMaxCodeSize = 128;
start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
/* load callee saved regs */
for (i = 0; i < AMD64_NREG; ++i) {
-#if defined(__native_client_codegen__)
- if (i == AMD64_R15)
- continue;
-#endif
if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
}
g_assert ((code - start) < kMaxCodeSize);
- nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
-
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
- const guint kMaxCodeSize = NACL_SIZE (256, 512);
+ const guint kMaxCodeSize = 256;
#ifdef TARGET_WIN32
dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
g_assert ((code - start) < kMaxCodeSize);
- nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
if (info)
static guint8* saved = NULL;
guint8 *code, *start;
int cont_reg = AMD64_R9; /* register usable on both call conventions */
- const guint kMaxCodeSize = NACL_SIZE (64, 128);
+ const guint kMaxCodeSize = 64;
if (saved)
amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
g_assert ((code - start) <= kMaxCodeSize);
- nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
{
#ifdef MONO_CROSS_COMPILE
g_assert_not_reached ();
-#elif defined(__native_client__)
- g_assert_not_reached ();
#else
arm_ucontext *my_uc = sigctx;
return (void*) UCONTEXT_REG_PC (my_uc);
/* jump to the saved IP */
x86_ret (code);
- nacl_global_codeman_validate(&start, 128, &code);
-
if (info)
*info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
else {
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- guint kMaxCodeSize = NACL_SIZE (64, 128);
+ guint kMaxCodeSize = 64;
/* call_filter (MonoContext *ctx, unsigned long eip) */
start = code = mono_global_codeman_reserve (kMaxCodeSize);
x86_leave (code);
x86_ret (code);
- nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
-
if (info)
*info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
else {
int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- guint kMaxCodeSize = NACL_SIZE (128, 256);
+ guint kMaxCodeSize = 128;
start = code = mono_global_codeman_reserve (kMaxCodeSize);
}
x86_breakpoint (code);
- nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
-
g_assert ((code - start) < kMaxCodeSize);
if (info)
gpointer
mono_arch_ip_from_context (void *sigctx)
{
-#if defined(__native_client__) || defined(HOST_WATCHOS)
+#if defined(HOST_WATCHOS)
printf("WARNING: mono_arch_ip_from_context() called!\n");
return (NULL);
#elif defined(MONO_ARCH_USE_SIGACTION)
static guint8* saved = NULL;
guint8 *code, *start;
-#ifdef __native_client_codegen__
- g_print("mono_tasklets_arch_restore needs to be aligned for Native Client\n");
-#endif
if (saved)
return (MonoContinuationRestore)saved;
code = start = mono_global_codeman_reserve (48);
}
}
-#ifdef __native_client_codegen__
-static void
-bin_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
- int offset = acfg->cur_section->cur_offset;
- int padding = kNaClAlignment - (offset & kNaClAlignmentMask) - kNaClLengthOfCallImm;
- guint8 padc = '\x90';
-
- if (padding < 0) padding += kNaClAlignment;
-
- while (padding > 0) {
- bin_writer_emit_bytes(acfg, &padc, 1);
- padding -= 1;
- }
-}
-#endif /* __native_client_codegen__ */
-
static void
bin_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
}
#endif
-#ifdef __native_client_codegen__
-static void
-asm_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
- int padding = kNaClAlignment - kNaClLengthOfCallImm;
- guint8 padc = '\x90';
-
- fprintf (acfg->fp, "\n\t.align %d", kNaClAlignment);
- while (padding > 0) {
- fprintf (acfg->fp, "\n\t.byte %d", padc);
- padding -= 1;
- }
-}
-#endif /* __native_client_codegen__ */
-
static void
asm_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
#endif
}
-#ifdef __native_client_codegen__
-void
-mono_img_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
-#ifdef USE_BIN_WRITER
- if (acfg->use_bin_writer)
- bin_writer_emit_nacl_call_alignment (acfg);
- else
- asm_writer_emit_nacl_call_alignment (acfg);
-#else
- g_assert_not_reached();
-#endif
-}
-#endif /* __native_client_codegen__ */
-
void
mono_img_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
void mono_img_writer_emit_alignment_fill (MonoImageWriter *w, int size, int fill);
-#ifdef __native_client_codegen__
-void mono_img_writer_emit_nacl_call_alignment (MonoImageWriter *w);
-#endif
-
void mono_img_writer_emit_pointer_unaligned (MonoImageWriter *w, const char *target);
void mono_img_writer_emit_pointer (MonoImageWriter *w, const char *target);
return amd64_is_imm32 (val);
}
-#ifdef __native_client_codegen__
-
-/* Keep track of instruction "depth", that is, the level of sub-instruction */
-/* for any given instruction. For instance, amd64_call_reg resolves to */
-/* amd64_call_reg_internal, which uses amd64_alu_* macros, etc. */
-/* We only want to force bundle alignment for the top level instruction, */
-/* so NaCl pseudo-instructions can be implemented with sub instructions. */
-static MonoNativeTlsKey nacl_instruction_depth;
-
-static MonoNativeTlsKey nacl_rex_tag;
-static MonoNativeTlsKey nacl_legacy_prefix_tag;
-
-void
-amd64_nacl_clear_legacy_prefix_tag ()
-{
- mono_native_tls_set_value (nacl_legacy_prefix_tag, NULL);
-}
-
-void
-amd64_nacl_tag_legacy_prefix (guint8* code)
-{
- if (mono_native_tls_get_value (nacl_legacy_prefix_tag) == NULL)
- mono_native_tls_set_value (nacl_legacy_prefix_tag, code);
-}
-
-void
-amd64_nacl_tag_rex (guint8* code)
-{
- mono_native_tls_set_value (nacl_rex_tag, code);
-}
-
-guint8*
-amd64_nacl_get_legacy_prefix_tag ()
-{
- return (guint8*)mono_native_tls_get_value (nacl_legacy_prefix_tag);
-}
-
-guint8*
-amd64_nacl_get_rex_tag ()
-{
- return (guint8*)mono_native_tls_get_value (nacl_rex_tag);
-}
-
-/* Increment the instruction "depth" described above */
-void
-amd64_nacl_instruction_pre ()
-{
- intptr_t depth = (intptr_t) mono_native_tls_get_value (nacl_instruction_depth);
- depth++;
- mono_native_tls_set_value (nacl_instruction_depth, (gpointer)depth);
-}
-
-/* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */
-/* alignment if depth == 0 (top level instruction) */
-/* IN: start, end pointers to instruction beginning and end */
-/* OUT: start, end pointers to beginning and end after possible alignment */
-/* GLOBALS: nacl_instruction_depth defined above */
-void
-amd64_nacl_instruction_post (guint8 **start, guint8 **end)
-{
- intptr_t depth = (intptr_t) mono_native_tls_get_value (nacl_instruction_depth);
- depth--;
- mono_native_tls_set_value (nacl_instruction_depth, (void*)depth);
-
- g_assert ( depth >= 0 );
- if (depth == 0) {
- uintptr_t space_in_block;
- uintptr_t instlen;
- guint8 *prefix = amd64_nacl_get_legacy_prefix_tag ();
- /* if legacy prefix is present, and if it was emitted before */
- /* the start of the instruction sequence, adjust the start */
- if (prefix != NULL && prefix < *start) {
- g_assert (*start - prefix <= 3);/* only 3 are allowed */
- *start = prefix;
- }
- space_in_block = kNaClAlignment - ((uintptr_t)(*start) & kNaClAlignmentMask);
- instlen = (uintptr_t)(*end - *start);
- /* Only check for instructions which are less than */
- /* kNaClAlignment. The only instructions that should ever */
- /* be that long are call sequences, which are already */
- /* padded out to align the return to the next bundle. */
- if (instlen > space_in_block && instlen < kNaClAlignment) {
- const size_t MAX_NACL_INST_LENGTH = kNaClAlignment;
- guint8 copy_of_instruction[MAX_NACL_INST_LENGTH];
- const size_t length = (size_t)((*end)-(*start));
- g_assert (length < MAX_NACL_INST_LENGTH);
-
- memcpy (copy_of_instruction, *start, length);
- *start = mono_arch_nacl_pad (*start, space_in_block);
- memcpy (*start, copy_of_instruction, length);
- *end = *start + length;
- }
- amd64_nacl_clear_legacy_prefix_tag ();
- amd64_nacl_tag_rex (NULL);
- }
-}
-
-/* amd64_nacl_membase_handler: ensure all access to memory of the form */
-/* OFFSET(%rXX) is sandboxed. For allowable base registers %rip, %rbp, */
-/* %rsp, and %r15, emit the membase as usual. For all other registers, */
-/* make sure the upper 32-bits are cleared, and use that register in the */
-/* index field of a new address of this form: OFFSET(%r15,%eXX,1) */
-/* IN: code */
-/* pointer to current instruction stream (in the */
-/* middle of an instruction, after opcode is emitted) */
-/* basereg/offset/dreg */
-/* operands of normal membase address */
-/* OUT: code */
-/* pointer to the end of the membase/memindex emit */
-/* GLOBALS: nacl_rex_tag */
-/* position in instruction stream that rex prefix was emitted */
-/* nacl_legacy_prefix_tag */
-/* (possibly NULL) position in instruction of legacy x86 prefix */
-void
-amd64_nacl_membase_handler (guint8** code, gint8 basereg, gint32 offset, gint8 dreg)
-{
- gint8 true_basereg = basereg;
-
- /* Cache these values, they might change */
- /* as new instructions are emitted below. */
- guint8* rex_tag = amd64_nacl_get_rex_tag ();
- guint8* legacy_prefix_tag = amd64_nacl_get_legacy_prefix_tag ();
-
- /* 'basereg' is given masked to 0x7 at this point, so check */
- /* the rex prefix to see if this is an extended register. */
- if ((rex_tag != NULL) && IS_REX(*rex_tag) && (*rex_tag & AMD64_REX_B)) {
- true_basereg |= 0x8;
- }
-
-#define X86_LEA_OPCODE (0x8D)
-
- if (!amd64_is_valid_nacl_base (true_basereg) && (*(*code-1) != X86_LEA_OPCODE)) {
- guint8* old_instruction_start;
-
- /* This will hold the 'mov %eXX, %eXX' that clears the upper */
- /* 32-bits of the old base register (new index register) */
- guint8 buf[32];
- guint8* buf_ptr = buf;
- size_t insert_len;
-
- g_assert (rex_tag != NULL);
-
- if (IS_REX(*rex_tag)) {
- /* The old rex.B should be the new rex.X */
- if (*rex_tag & AMD64_REX_B) {
- *rex_tag |= AMD64_REX_X;
- }
- /* Since our new base is %r15 set rex.B */
- *rex_tag |= AMD64_REX_B;
- } else {
- /* Shift the instruction by one byte */
- /* so we can insert a rex prefix */
- memmove (rex_tag + 1, rex_tag, (size_t)(*code - rex_tag));
- *code += 1;
- /* New rex prefix only needs rex.B for %r15 base */
- *rex_tag = AMD64_REX(AMD64_REX_B);
- }
-
- if (legacy_prefix_tag) {
- old_instruction_start = legacy_prefix_tag;
- } else {
- old_instruction_start = rex_tag;
- }
-
- /* Clears the upper 32-bits of the previous base register */
- amd64_mov_reg_reg_size (buf_ptr, true_basereg, true_basereg, 4);
- insert_len = buf_ptr - buf;
-
- /* Move the old instruction forward to make */
- /* room for 'mov' stored in 'buf_ptr' */
- memmove (old_instruction_start + insert_len, old_instruction_start, (size_t)(*code - old_instruction_start));
- *code += insert_len;
- memcpy (old_instruction_start, buf, insert_len);
-
- /* Sandboxed replacement for the normal membase_emit */
- x86_memindex_emit (*code, dreg, AMD64_R15, offset, basereg, 0);
-
- } else {
- /* Normal default behavior, emit membase memory location */
- x86_membase_emit_body (*code, dreg, basereg, offset);
- }
-}
-
-
-static inline unsigned char*
-amd64_skip_nops (unsigned char* code)
-{
- guint8 in_nop;
- do {
- in_nop = 0;
- if ( code[0] == 0x90) {
- in_nop = 1;
- code += 1;
- }
- if ( code[0] == 0x66 && code[1] == 0x90) {
- in_nop = 1;
- code += 2;
- }
- if (code[0] == 0x0f && code[1] == 0x1f
- && code[2] == 0x00) {
- in_nop = 1;
- code += 3;
- }
- if (code[0] == 0x0f && code[1] == 0x1f
- && code[2] == 0x40 && code[3] == 0x00) {
- in_nop = 1;
- code += 4;
- }
- if (code[0] == 0x0f && code[1] == 0x1f
- && code[2] == 0x44 && code[3] == 0x00
- && code[4] == 0x00) {
- in_nop = 1;
- code += 5;
- }
- if (code[0] == 0x66 && code[1] == 0x0f
- && code[2] == 0x1f && code[3] == 0x44
- && code[4] == 0x00 && code[5] == 0x00) {
- in_nop = 1;
- code += 6;
- }
- if (code[0] == 0x0f && code[1] == 0x1f
- && code[2] == 0x80 && code[3] == 0x00
- && code[4] == 0x00 && code[5] == 0x00
- && code[6] == 0x00) {
- in_nop = 1;
- code += 7;
- }
- if (code[0] == 0x0f && code[1] == 0x1f
- && code[2] == 0x84 && code[3] == 0x00
- && code[4] == 0x00 && code[5] == 0x00
- && code[6] == 0x00 && code[7] == 0x00) {
- in_nop = 1;
- code += 8;
- }
- } while ( in_nop );
- return code;
-}
-
-guint8*
-mono_arch_nacl_skip_nops (guint8* code)
-{
- return amd64_skip_nops(code);
-}
-
-#endif /*__native_client_codegen__*/
-
static void
amd64_patch (unsigned char* code, gpointer target)
{
guint8 rex = 0;
-#ifdef __native_client_codegen__
- code = amd64_skip_nops (code);
-#endif
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (nacl_is_code_address (code)) {
- /* For tail calls, code is patched after being installed */
- /* but not through the normal "patch callsite" method. */
- unsigned char buf[kNaClAlignment];
- unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
- int ret;
- memcpy (buf, aligned_code, kNaClAlignment);
- /* Patch a temp buffer of bundle size, */
- /* then install to actual location. */
- amd64_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), target);
- ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
- g_assert (ret == 0);
- return;
- }
- target = nacl_modify_patch_target (target);
-#endif
-
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
rex = code [0];
return class1;
}
-#ifdef __native_client_codegen__
-
-/* Default alignment for Native Client is 32-byte. */
-gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
-
-/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
-/* Check that alignment doesn't cross an alignment boundary. */
-guint8*
-mono_arch_nacl_pad(guint8 *code, int pad)
-{
- const int kMaxPadding = 8; /* see amd64-codegen.h:amd64_padding_size() */
-
- if (pad == 0) return code;
- /* assertion: alignment cannot cross a block boundary */
- g_assert (((uintptr_t)code & (~kNaClAlignmentMask)) ==
- (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
- while (pad >= kMaxPadding) {
- amd64_padding (code, kMaxPadding);
- pad -= kMaxPadding;
- }
- if (pad != 0) amd64_padding (code, pad);
- return code;
-}
-#endif
static int
count_fields_nested (MonoClass *klass)
mono_arch_init (void)
{
mono_os_mutex_init_recursive (&mini_arch_mutex);
-#if defined(__native_client_codegen__)
- mono_native_tls_alloc (&nacl_instruction_depth, NULL);
- mono_native_tls_set_value (nacl_instruction_depth, (gpointer)0);
- mono_native_tls_alloc (&nacl_rex_tag, NULL);
- mono_native_tls_alloc (&nacl_legacy_prefix_tag, NULL);
-#endif
mono_aot_register_jit_icall ("mono_amd64_throw_exception", mono_amd64_throw_exception);
mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception);
mono_arch_cleanup (void)
{
mono_os_mutex_destroy (&mini_arch_mutex);
-#if defined(__native_client_codegen__)
- mono_native_tls_free (nacl_instruction_depth);
- mono_native_tls_free (nacl_rex_tag);
- mono_native_tls_free (nacl_legacy_prefix_tag);
-#endif
}
/*
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
-#ifdef __native_client_codegen__
- /* NaCl modules may not change the value of RBP, so it cannot be */
- /* used as a normal register, but it can be used as a frame pointer*/
- cfg->disable_omit_fp = TRUE;
- cfg->arch.omit_fp = FALSE;
-#endif
-
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
-#ifndef __native_client_codegen__
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
-#endif
#ifdef TARGET_WIN32
regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
-#ifndef __native_client_codegen__
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
-#endif
regs = g_list_prepend (regs, (gpointer)AMD64_R10);
regs = g_list_prepend (regs, (gpointer)AMD64_R9);
g_free (ainfo);
}
-#if !defined(__native_client__)
#define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
#define GREG_TO_PTR(greg) (gpointer)(greg)
-#else
-/* Correctly handle casts to/from 32-bit pointers without compiler warnings */
-#define PTR_TO_GREG(ptr) (mgreg_t)(uintptr_t)(ptr)
-#define GREG_TO_PTR(greg) (gpointer)(guint32)(greg)
-#endif
/*
* mono_arch_get_start_dyn_call:
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
-#endif
-#if defined(__native_client__)
- /* Always use near_call == TRUE for Native Client */
- near_call = TRUE;
#endif
/* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
if (optimize_for_xen)
case OP_LOAD_MEMBASE:
#endif
case OP_LOADI8_MEMBASE:
-#ifndef __native_client_codegen__
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
if (!amd64_use_imm32 (ins->inst_offset)) {
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
-#endif
break;
#ifndef __mono_ilp32__
case OP_STORE_MEMBASE_IMM:
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
amd64_cld (code);
-#if defined(__default_codegen__)
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
-#elif defined(__native_client_codegen__)
- /* NaCl stos pseudo-instruction */
- amd64_codegen_pre(code);
- /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
- amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
- /* Add %r15 to %rdi using lea, condition flags unaffected. */
- amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
- amd64_prefix (code, X86_REP_PREFIX);
- amd64_stosl (code);
- amd64_codegen_post(code);
-#endif /* __native_client_codegen__ */
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
amd64_pop_reg (code, AMD64_RDI);
}
}
-#if defined(__native_client_codegen__)
- /* For Native Client, all indirect call/jump targets must be */
- /* 32-byte aligned. Exception handler blocks are jumped to */
- /* indirectly as well. */
- gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
- (bb->flags & BB_EXCEPTION_HANDLER);
-
- if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
- int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
- if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
- cfg->code_len += pad;
- bb->native_offset = cfg->code_len;
- }
-#endif /*__native_client_codegen__*/
-
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
-#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+#define EXTRA_CODE_SPACE (16)
if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
break;
case OP_STORE_MEMBASE_IMM:
-#ifndef __native_client_codegen__
/* In NaCl, this could be a PCONST type, which could */
/* mean a pointer type was copied directly into the */
/* lower 32-bits of inst_imm, so for InvalidPtr==-1 */
/* the value would be 0x00000000FFFFFFFF which is */
/* not proper for an imm32 unless you cast it. */
g_assert (amd64_is_imm32 (ins->inst_imm));
-#endif
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
break;
case OP_STOREI8_MEMBASE_IMM:
}
case OP_LDIV:
case OP_LREM:
-#if defined( __native_client_codegen__ )
- amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
/* Regalloc magic makes the div/rem cases the same */
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
break;
case OP_LDIV_UN:
case OP_LREM_UN:
-#if defined( __native_client_codegen__ )
- amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
break;
case OP_IDIV:
case OP_IREM:
-#if defined( __native_client_codegen__ )
- amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq_size (code, 4);
break;
case OP_IDIV_UN:
case OP_IREM_UN:
-#if defined( __native_client_codegen__ )
- amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg2, 0, 4);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
break;
}
case OP_GC_SAFE_POINT: {
- const char *polling_func = NULL;
- int compare_val = 0;
guint8 *br [1];
-#if defined(__native_client_codegen__) && defined(__native_client_gc__)
- polling_func = "mono_nacl_gc";
- compare_val = 0xFFFFFFFF;
-#else
g_assert (mono_threads_is_coop_enabled ());
- polling_func = "mono_threads_state_poll";
- compare_val = 1;
-#endif
- amd64_test_membase_imm_size (code, ins->sreg1, 0, compare_val, 4);
+ amd64_test_membase_imm_size (code, ins->sreg1, 0, 1, 4);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func, FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_threads_state_poll", FALSE);
amd64_patch (br[0], code);
break;
}
}
if ((code - cfg->native_code - offset) > max_len) {
-#if !defined(__native_client_codegen__)
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
-#endif
}
}
MonoInst *lmf_var = cfg->lmf_var;
gboolean args_clobbered = FALSE;
gboolean trace = FALSE;
-#ifdef __native_client_codegen__
- guint alignment_check;
-#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 1024);
-#if defined(__default_codegen__)
code = cfg->native_code = (unsigned char *)g_malloc (cfg->code_size);
-#elif defined(__native_client_codegen__)
- /* native_code_alloc is not 32-byte aligned, native_code is. */
- cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
-
- /* Align native_code to next nearest kNaclAlignment byte. */
- cfg->native_code = (uintptr_t)cfg->native_code_alloc + kNaClAlignment;
- cfg->native_code = (uintptr_t)cfg->native_code & ~kNaClAlignmentMask;
-
- code = cfg->native_code;
-
- alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
- g_assert (alignment_check == 0);
-#endif
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
trace = TRUE;
amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
amd64_cld (code);
-#if defined(__default_codegen__)
- amd64_prefix (code, X86_REP_PREFIX);
- amd64_stosl (code);
-#elif defined(__native_client_codegen__)
- /* NaCl stos pseudo-instruction */
- amd64_codegen_pre (code);
- /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
- amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
- /* Add %r15 to %rdi using lea, condition flags unaffected. */
- amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
- amd64_codegen_post (code);
-#endif /* __native_client_codegen__ */
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_length += LOOP_ALIGNMENT;
-#ifdef __native_client_codegen__
- /* max alignment for native client */
- max_length += kNaClAlignment;
-#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
-#ifdef __native_client_codegen__
- {
- int space_in_block = kNaClAlignment -
- ((max_length + cfg->code_len) & kNaClAlignmentMask);
- int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
- if (space_in_block < max_len && max_len < kNaClAlignment) {
- max_length += space_in_block;
- }
- }
-#endif /*__native_client_codegen__*/
max_length += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
code_size += 8 + 7; /*sizeof (void*) + alignment */
}
-#ifdef __native_client_codegen__
- /* Give us extra room on Native Client. This could be */
- /* more carefully calculated, but bundle alignment makes */
- /* it much trickier, so *2 like other places is good. */
- code_size *= 2;
-#endif
-
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = (unsigned char *)mono_realloc_native_code (cfg);
guint32 target_pos;
/* The SSE opcodes require a 16 byte alignment */
-#if defined(__default_codegen__)
code = (guint8*)ALIGN_TO (code, 16);
-#elif defined(__native_client_codegen__)
- {
- /* Pad this out with HLT instructions */
- /* or we can get garbage bytes emitted */
- /* which will fail validation */
- guint8 *aligned_code;
- /* extra align to make room for */
- /* mov/push below */
- int extra_align = patch_info->type == MONO_PATCH_INFO_R8 ? 2 : 1;
- aligned_code = (guint8*)ALIGN_TO (code + extra_align, 16);
- /* The technique of hiding data in an */
- /* instruction has a problem here: we */
- /* need the data aligned to a 16-byte */
- /* boundary but the instruction cannot */
- /* cross the bundle boundary. so only */
- /* odd multiples of 16 can be used */
- if ((intptr_t)aligned_code % kNaClAlignment == 0) {
- aligned_code += 16;
- }
- while (code < aligned_code) {
- *(code++) = 0xf4; /* hlt */
- }
- }
-#endif
pos = cfg->native_code + patch_info->ip.i;
if (IS_REX (pos [1])) {
}
if (patch_info->type == MONO_PATCH_INFO_R8) {
-#ifdef __native_client_codegen__
- /* Hide 64-bit data in a */
- /* "mov imm64, r11" instruction. */
- /* write it before the start of */
- /* the data*/
- *(code-2) = 0x49; /* prefix */
- *(code-1) = 0xbb; /* mov X, %r11 */
-#endif
*(double*)code = *(double*)patch_info->data.target;
code += sizeof (double);
} else {
-#ifdef __native_client_codegen__
- /* Hide 32-bit data in a */
- /* "push imm32" instruction. */
- *(code-1) = 0x68; /* push */
-#endif
*(float*)code = *(float*)patch_info->data.target;
code += sizeof (float);
}
return TRUE;
}
-#if defined(__native_client_codegen__)
-/* For membase calls, we want the base register. for Native Client, */
-/* all indirect calls have the following sequence with the given sizes: */
-/* mov %eXX,%eXX [2-3] */
-/* mov disp(%r15,%rXX,scale),%r11d [4-8] */
-/* and $0xffffffffffffffe0,%r11d [4] */
-/* add %r15,%r11 [3] */
-/* callq *%r11 [3] */
-
-
-/* Determine if code points to a NaCl call-through-register sequence, */
-/* (i.e., the last 3 instructions listed above) */
-int
-is_nacl_call_reg_sequence(guint8* code)
-{
- const char *sequence = "\x41\x83\xe3\xe0" /* and */
- "\x4d\x03\xdf" /* add */
- "\x41\xff\xd3"; /* call */
- return memcmp(code, sequence, 10) == 0;
-}
-
-/* Determine if code points to the first opcode of the mov membase component */
-/* of an indirect call sequence (i.e. the first 2 instructions listed above) */
-/* (there could be a REX prefix before the opcode but it is ignored) */
-static int
-is_nacl_indirect_call_membase_sequence(guint8* code)
-{
- /* Check for mov opcode, reg-reg addressing mode (mod = 3), */
- return code[0] == 0x8b && amd64_modrm_mod(code[1]) == 3 &&
- /* and that src reg = dest reg */
- amd64_modrm_reg(code[1]) == amd64_modrm_rm(code[1]) &&
- /* Check that next inst is mov, uses SIB byte (rm = 4), */
- IS_REX(code[2]) &&
- code[3] == 0x8b && amd64_modrm_rm(code[4]) == 4 &&
- /* and has dst of r11 and base of r15 */
- (amd64_modrm_reg(code[4]) + amd64_rex_r(code[2])) == AMD64_R11 &&
- (amd64_sib_base(code[5]) + amd64_rex_b(code[2])) == AMD64_R15;
-}
-#endif /* __native_client_codegen__ */
-
int
mono_arch_get_this_arg_reg (guint8 *code)
{
g_assert ((code - start) < 64);
}
- nacl_global_codeman_validate (&start, 64, &code);
mono_arch_flush_icache (start, code - start);
if (has_target) {
{
}
-#if defined(__default_codegen__)
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
#define BR_SMALL_SIZE 2
#define MOV_REG_IMM_SIZE 10
#define MOV_REG_IMM_32BIT_SIZE 6
#define JUMP_REG_SIZE (2 + 1)
-#elif defined(__native_client_codegen__)
-/* NaCl N-byte instructions can be padded up to N-1 bytes */
-#define CMP_SIZE ((6 + 1) * 2 - 1)
-#define CMP_REG_REG_SIZE ((4 + 1) * 2 - 1)
-#define BR_SMALL_SIZE (2 * 2 - 1)
-#define BR_LARGE_SIZE (6 * 2 - 1)
-#define MOV_REG_IMM_SIZE (10 * 2 - 1)
-#define MOV_REG_IMM_32BIT_SIZE (6 * 2 - 1)
-/* Jump reg for NaCl adds a mask (+4) and add (+3) */
-#define JUMP_REG_SIZE ((2 + 1 + 4 + 3) * 2 - 1)
-/* Jump membase's size is large and unpredictable */
-/* in native client, just pad it out a whole bundle. */
-#define JUMP_MEMBASE_SIZE (kNaClAlignment)
-#endif
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
-#ifdef __native_client_codegen__
- item->chunk_size += JUMP_MEMBASE_SIZE;
-#endif
}
item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
} else {
/* with assert below:
* item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
*/
-#ifdef __native_client_codegen__
- item->chunk_size += JUMP_MEMBASE_SIZE;
-#endif
}
}
} else {
}
size += item->chunk_size;
}
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* In Native Client, we don't re-use thunks, allocate from the */
- /* normal code manager paths. */
- code = mono_domain_code_reserve (domain, size);
-#else
if (fail_tramp)
code = (guint8 *)mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = (guint8 *)mono_domain_code_reserve (domain, size);
-#endif
start = code;
unwind_ops = mono_arch_get_cie_program ();
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
- nacl_domain_code_validate(domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
#include <mono/utils/mono-context.h>
#include <glib.h>
-#ifdef __native_client_codegen__
-#define kNaClAlignmentAMD64 32
-#define kNaClAlignmentMaskAMD64 (kNaClAlignmentAMD64 - 1)
-
-/* TODO: use kamd64NaClLengthOfCallImm */
-/* temporarily using kNaClAlignmentAMD64 so padding in */
-/* image-writer.c doesn't happen */
-#define kNaClLengthOfCallImm kNaClAlignmentAMD64
-
-int is_nacl_call_reg_sequence (guint8* code);
-void amd64_nacl_clear_legacy_prefix_tag ();
-void amd64_nacl_tag_legacy_prefix (guint8* code);
-void amd64_nacl_tag_rex (guint8* code);
-guint8* amd64_nacl_get_legacy_prefix_tag ();
-guint8* amd64_nacl_get_rex_tag ();
-void amd64_nacl_instruction_pre ();
-void amd64_nacl_instruction_post (guint8 **start, guint8 **end);
-void amd64_nacl_membase_handler (guint8** code, gint8 basereg, gint32 offset, gint8 dreg);
-#endif
-
#ifdef HOST_WIN32
#include <windows.h>
/* use SIG* defines if possible */
* the 'rbp' field is not valid.
*/
gpointer previous_lmf;
-#if defined(__default_codegen__) || defined(HOST_WIN32)
guint64 rip;
-#elif defined(__native_client_codegen__)
- /* On 64-bit compilers, default alignment is 8 for this field, */
- /* this allows the structure to match for 32-bit compilers. */
- guint64 rip __attribute__ ((aligned(8)));
-#endif
guint64 rbp;
guint64 rsp;
};
*/
#define MONO_ARCH_VARARG_ICALLS 1
-#if (!defined( HOST_WIN32 ) && !defined(__native_client__) && !defined(__native_client_codegen__)) && defined (HAVE_SIGACTION)
+#if !defined( HOST_WIN32 ) && defined (HAVE_SIGACTION)
#define MONO_ARCH_USE_SIGACTION 1
#endif
-#endif /* !HOST_WIN32 && !__native_client__ */
+#endif /* !HOST_WIN32 */
#if !defined(__linux__)
#define MONO_ARCH_NOMAP32BIT 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_AOT_SUPPORTED 1
-#if !defined( __native_client__ )
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
-#endif
#define MONO_ARCH_SUPPORT_TASKLETS 1
void
mono_arch_flush_icache (guint8 *code, gint size)
{
-#if defined(MONO_CROSS_COMPILE) || defined(__native_client__)
- // For Native Client we don't have to flush i-cache here,
- // as it's being done by dyncode interface.
+#if defined(MONO_CROSS_COMPILE)
#elif __APPLE__
sys_icache_invalidate (code, size);
#else
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
case OP_GC_SAFE_POINT: {
- const char *polling_func = NULL;
guint8 *buf [1];
g_assert (mono_threads_is_coop_enabled ());
- polling_func = "mono_threads_state_poll";
ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
buf [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_threads_state_poll");
code = emit_call_seq (cfg, code);
arm_patch (buf [0], code);
break;
#include <mono/utils/mono-context.h>
#include <glib.h>
-#ifdef __native_client_codegen__
-#define kNaClAlignmentARM 16
-#define kNaClAlignmentMaskARM (kNaClAlignmentARM - 1)
-#define kNaClLengthOfCallImm 4
-#endif
-
#if defined(ARM_FPU_NONE)
#define MONO_ARCH_SOFT_FLOAT_FALLBACK 1
#endif
#define MONO_ARCH_USE_SIGACTION 1
-#if defined(__native_client__) || defined(HOST_WATCHOS)
+#if defined(HOST_WATCHOS)
#undef MONO_ARCH_USE_SIGACTION
#endif
#define MONO_ARCH_HAVE_PATCH_CODE_NEW 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
-#if defined(__native_client__)
-#undef MONO_ARCH_SOFT_DEBUG_SUPPORTED
-#undef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX
-#undef MONO_ARCH_HAVE_CONTEXT_SET_INT_REG
-#endif
-
#define MONO_ARCH_HAVE_TLS_GET (mono_arm_have_tls_get ())
#define MONO_ARCH_HAVE_TLS_GET_REG 1
__nacl_suspend_thread_if_needed();
#endif
}
-
-/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
- * which we are generating code, return a pointer to the destination in the
- * dynamic code segment into which the code will be copied when
- * mono_global_codeman_commit is called.
- * LOCKING: Acquires the jit lock.
- */
-void*
-nacl_global_codeman_get_dest (void *data)
-{
- void *dest;
- mono_jit_lock ();
- dest = nacl_code_manager_get_code_dest (global_codeman, data);
- mono_jit_unlock ();
- return dest;
-}
-
-void
-mono_global_codeman_commit (void *data, int size, int newsize)
-{
- mono_jit_lock ();
- mono_code_manager_commit (global_codeman, data, size, newsize);
- mono_jit_unlock ();
-}
-
-/*
- * Convenience function which calls mono_global_codeman_commit to validate and
- * copy the code. The caller sets *buf_base and *buf_size to the start and size
- * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
- * the byte after the last instruction byte. On return, *buf_base will point to
- * the start of the copied in the code segment, and *code_end will point after
- * the end of the copied code.
- */
-void
-nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
-{
- guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
- mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
- *code_end = tmp + (*code_end - *buf_base);
- *buf_base = tmp;
-}
-#else
-/* no-op versions of Native Client functions */
-void*
-nacl_global_codeman_get_dest (void *data)
-{
- return data;
-}
-
-void
-mono_global_codeman_commit (void *data, int size, int newsize)
-{
-}
-
-void
-nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
-{
-}
-
#endif /* __native_client__ */
/**
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* Need to transform to the destination address, it's */
- /* emitted as an immediate in the code. */
- target = nacl_inverse_modify_patch_target(ip);
-#else
target = ip;
-#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE, error);
if (!mono_error_ok (error))
return NULL;
-#if defined(__native_client__) && defined(__native_client_codegen__)
-# if defined(TARGET_AMD64)
- /* This target is an absolute address, not relative to the */
- /* current code being emitted on AMD64. */
- target = nacl_inverse_modify_patch_target(target);
-# endif
-#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* 'code' is relative to the current code blob, we */
- /* need to do this transform on it to make the */
- /* pointers in this table absolute */
- jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
-#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
-#endif
}
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* jump_table is in the data section, we need to transform */
- /* it here so when it gets modified in amd64_patch it will */
- /* then point back to the absolute data address */
- target = nacl_inverse_modify_patch_target (jump_table);
-#else
target = jump_table;
-#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, "mono_lconv_to_r8_un", FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
-#if defined(__default_codegen__)
+#if !defined(__native_client__)
register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, "fmod", FALSE);
register_opcode_emulation (OP_RREM, "__emul_rrem", "float float float", fmodf, "fmodf", FALSE);
-#elif defined(__native_client_codegen__)
+#else
register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, "mono_fmod", FALSE);
#endif
#endif
static guint8*
emit_load_aotconst (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji, int dreg, int tramp_type, gconstpointer target);
-#ifdef __native_client_codegen__
-
-/* Default alignment for Native Client is 32-byte. */
-gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
-
-/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
-/* Check that alignment doesn't cross an alignment boundary. */
-guint8 *
-mono_arch_nacl_pad (guint8 *code, int pad)
-{
- const int kMaxPadding = 7; /* see x86-codegen.h: x86_padding() */
-
- if (pad == 0) return code;
- /* assertion: alignment cannot cross a block boundary */
- g_assert(((uintptr_t)code & (~kNaClAlignmentMask)) ==
- (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
- while (pad >= kMaxPadding) {
- x86_padding (code, kMaxPadding);
- pad -= kMaxPadding;
- }
- if (pad != 0) x86_padding (code, pad);
- return code;
-}
-
-guint8 *
-mono_arch_nacl_skip_nops (guint8 *code)
-{
- x86_skip_nops (code);
- return code;
-}
-
-#endif /* __native_client_codegen__ */
-
const char*
mono_arch_regname (int reg)
{
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
-#if !defined(__native_client__)
guint32 opts = 0;
*exclude_mask = 0;
#endif
return opts;
-#else
- return MONO_OPT_CMOV | MONO_OPT_FCMOV | MONO_OPT_SSE2;
-#endif
}
/*
This is required for code patching to be safe on SMP machines.
*/
pad_size = (guint32)(code + 1 - cfg->native_code) & 0x3;
-#ifndef __native_client_codegen__
if (needs_paddings && pad_size)
x86_padding (code, 4 - pad_size);
-#endif
mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
x86_call_code (code, 0);
bb->native_offset = cfg->code_len;
}
}
-#ifdef __native_client_codegen__
- {
- /* For Native Client, all indirect call/jump targets must be */
- /* 32-byte aligned. Exception handler blocks are jumped to */
- /* indirectly as well. */
- gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
- (bb->flags & BB_EXCEPTION_HANDLER);
-
- /* if ((cfg->code_len & kNaClAlignmentMask) != 0) { */
- if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
- int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
- if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
- cfg->code_len += pad;
- bb->native_offset = cfg->code_len;
- }
- }
-#endif /* __native_client_codegen__ */
+
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
-#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+#define EXTRA_CODE_SPACE (16)
if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
break;
case OP_IDIV:
case OP_IREM:
-#if defined( __native_client_codegen__ )
- x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
/*
* The code is the same for div/rem, the allocator will allocate dreg
* to RAX/RDX as appropriate.
break;
case OP_IDIV_UN:
case OP_IREM_UN:
-#if defined( __native_client_codegen__ )
- x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
if (ins->sreg2 == X86_EDX) {
x86_push_reg (code, ins->sreg2);
x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
}
break;
case OP_DIV_IMM:
-#if defined( __native_client_codegen__ )
- if (ins->inst_imm == 0) {
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "DivideByZeroException");
- x86_jump32 (code, 0);
- break;
- }
-#endif
x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
x86_cdq (code);
x86_div_reg (code, ins->sreg2, TRUE);
break;
}
case OP_GC_SAFE_POINT: {
- const char *polling_func = NULL;
- int compare_val = 0;
guint8 *br [1];
-#if defined(__native_client_codegen__) && defined(__native_client_gc__)
- polling_func = "mono_nacl_gc";
- compare_val = 0xFFFFFFFF;
-#else
g_assert (mono_threads_is_coop_enabled ());
- polling_func = "mono_threads_state_poll";
- compare_val = 1;
-#endif
- x86_test_membase_imm (code, ins->sreg1, 0, compare_val);
+ x86_test_membase_imm (code, ins->sreg1, 0, 1);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_threads_state_poll");
x86_patch (br [0], code);
break;
}
if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
-#ifndef __native_client_codegen__
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
-#endif /* __native_client_codegen__ */
}
cpos += max_len;
case MONO_PATCH_INFO_LABEL:
case MONO_PATCH_INFO_RGCTX_FETCH:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (nacl_is_code_address (code)) {
- /* For tail calls, code is patched after being installed */
- /* but not through the normal "patch callsite" method. */
- unsigned char buf[kNaClAlignment];
- unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
- unsigned char *_target = target;
- int ret;
- /* All patch targets modified in x86_patch */
- /* are IP relative. */
- _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
- memcpy (buf, aligned_code, kNaClAlignment);
- /* Patch a temp buffer of bundle size, */
- /* then install to actual location. */
- x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
- ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
- g_assert (ret == 0);
- }
- else {
- x86_patch (ip, (unsigned char*)target);
- }
-#else
x86_patch (ip, (unsigned char*)target);
-#endif
break;
case MONO_PATCH_INFO_NONE:
break;
}
default: {
guint32 offset = mono_arch_get_patch_offset (ip);
-#if !defined(__native_client__)
*((gconstpointer *)(ip + offset)) = target;
-#else
- *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
-#endif
break;
}
}
int alloc_size, pos, max_offset, i, cfa_offset;
guint8 *code;
gboolean need_stack_frame;
-#ifdef __native_client_codegen__
- guint alignment_check;
-#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
-#if defined(__default_codegen__)
code = cfg->native_code = g_malloc (cfg->code_size);
-#elif defined(__native_client_codegen__)
- /* native_code_alloc is not 32-byte aligned, native_code is. */
- cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
- cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
-
- /* Align native_code to next nearest kNaclAlignment byte. */
- cfg->native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
- cfg->native_code = (guint)cfg->native_code & ~kNaClAlignmentMask;
-
- code = cfg->native_code;
-
- alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
- g_assert(alignment_check == 0);
-#endif
#if 0
{
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_offset += LOOP_ALIGNMENT;
-#ifdef __native_client_codegen__
- /* max alignment for native client */
- if (bb->flags & BB_INDIRECT_JUMP_TARGET || bb->flags & BB_EXCEPTION_HANDLER)
- max_offset += kNaClAlignment;
-#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_LABEL)
ins->inst_c1 = max_offset;
-#ifdef __native_client_codegen__
- switch (ins->opcode)
- {
- case OP_FCALL:
- case OP_LCALL:
- case OP_VCALL:
- case OP_VCALL2:
- case OP_VOIDCALL:
- case OP_CALL:
- case OP_FCALL_REG:
- case OP_LCALL_REG:
- case OP_VCALL_REG:
- case OP_VCALL2_REG:
- case OP_VOIDCALL_REG:
- case OP_CALL_REG:
- case OP_FCALL_MEMBASE:
- case OP_LCALL_MEMBASE:
- case OP_VCALL_MEMBASE:
- case OP_VCALL2_MEMBASE:
- case OP_VOIDCALL_MEMBASE:
- case OP_CALL_MEMBASE:
- max_offset += kNaClAlignment;
- break;
- default:
- max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN] - 1;
- break;
- }
-#endif /* __native_client_codegen__ */
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
}
guint32 size;
/* Compute size of code following the push <OFFSET> */
-#if defined(__default_codegen__)
size = 5 + 5;
-#elif defined(__native_client_codegen__)
- code = mono_nacl_align (code);
- size = kNaClAlignment;
-#endif
+
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
if ((code - cfg->native_code) - throw_ip < 126 - size) {
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
-#if defined(__default_codegen__)
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 5
-#elif defined(__native_client_codegen__)
-/* I suspect the size calculation below is actually incorrect. */
-/* TODO: fix the calculation that uses these sizes. */
-#define BR_SMALL_SIZE 16
-#define BR_LARGE_SIZE 12
-#endif /*__native_client_codegen__*/
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
}
size += item->chunk_size;
}
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* In Native Client, we don't re-use thunks, allocate from the */
- /* normal code manager paths. */
- size = NACL_BUNDLE_ALIGN_UP (size);
- code = mono_domain_code_reserve (domain, size);
-#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
-#endif
start = code;
unwind_ops = mono_arch_get_cie_program ();
g_free (buff);
}
- nacl_domain_code_validate (domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
-#ifdef __native_client_codegen__
- /* TODO: calculate this size correctly */
- code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
-#else
code_reserve = 8 + (param_count * 8);
-#endif /* __native_client_codegen__ */
/*
* The stack contains:
* <args in reverse order>
g_assert ((code - start) < code_reserve);
}
- nacl_global_codeman_validate (&start, code_reserve, &code);
-
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
#include <mono/utils/mono-sigcontext.h>
#include <mono/utils/mono-context.h>
-#ifdef __native_client_codegen__
-#define kNaClAlignmentX86 32
-#define kNaClAlignmentMaskX86 (kNaClAlignmentX86 - 1)
-
-#define kNaClLengthOfCallImm kx86NaClLengthOfCallImm
-#endif
-
#ifdef HOST_WIN32
#include <windows.h>
/* use SIG* defines if possible */
#define MONO_ARCH_USE_SIGACTION
#endif
-#if defined(__native_client__) || defined(HOST_WATCHOS)
+#if defined(HOST_WATCHOS)
#undef MONO_ARCH_USE_SIGACTION
#endif
#define MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK 1
#define MONO_ARCH_HAVE_LIVERANGE_OPS 1
#define MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX 1
-#if !defined(__native_client_codegen__)
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
-#endif
#define MONO_ARCH_GOT_REG X86_EBX
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
gpointer
mono_realloc_native_code (MonoCompile *cfg)
{
-#if defined(__default_codegen__)
return g_realloc (cfg->native_code, cfg->code_size);
-#elif defined(__native_client_codegen__)
- guint old_padding;
- gpointer native_code;
- guint alignment_check;
-
- /* Save the old alignment offset so we can re-align after the realloc. */
- old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
- cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
-
- cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
- cfg->code_size + kNaClAlignment );
-
- /* Align native_code to next nearest kNaClAlignment byte. */
- native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
- native_code = (guint)native_code & ~kNaClAlignmentMask;
-
- /* Shift the data to be 32-byte aligned again. */
- memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
-
- alignment_check = (guint)native_code & kNaClAlignmentMask;
- g_assert (alignment_check == 0);
- return native_code;
-#else
- g_assert_not_reached ();
- return cfg->native_code;
-#endif
-}
-
-#ifdef __native_client_codegen__
-
-/* Prevent instructions from straddling a 32-byte alignment boundary. */
-/* Instructions longer than 32 bytes must be aligned internally. */
-/* IN: pcode, instlen */
-/* OUT: pcode */
-void mono_nacl_align_inst(guint8 **pcode, int instlen) {
- int space_in_block;
-
- space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
-
- if (G_UNLIKELY (instlen >= kNaClAlignment)) {
- g_assert_not_reached();
- } else if (instlen > space_in_block) {
- *pcode = mono_arch_nacl_pad(*pcode, space_in_block);
- }
}
-/* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
-/* IN: start pointer to start of call sequence */
-/* IN: pcode pointer to end of call sequence (current "IP") */
-/* OUT: start pointer to the start of the call sequence after padding */
-/* OUT: pcode pointer to the end of the call sequence after padding */
-void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
- const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
- guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
- guint8 *temp;
-
- const size_t length = (size_t)((*pcode)-(*start));
- g_assert(length < MAX_NACL_CALL_LENGTH);
-
- memcpy(copy_of_call, *start, length);
- temp = mono_nacl_pad_call(*start, (guint8)length);
- memcpy(temp, copy_of_call, length);
- (*start) = temp;
- (*pcode) = temp + length;
-}
-
-/* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
-/* code pointer to buffer for emitting code */
-/* ilength length of call instruction */
-guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
- int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
- int padding = freeSpaceInBlock - ilength;
-
- if (padding < 0) {
- /* There isn't enough space in this block for the instruction. */
- /* Fill this block and start a new one. */
- code = mono_arch_nacl_pad(code, freeSpaceInBlock);
- freeSpaceInBlock = kNaClAlignment;
- padding = freeSpaceInBlock - ilength;
- }
- g_assert(ilength > 0);
- g_assert(padding >= 0);
- g_assert(padding < kNaClAlignment);
- if (0 == padding) return code;
- return mono_arch_nacl_pad(code, padding);
-}
-
-guint8 *mono_nacl_align(guint8 *code) {
- int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
- if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
- return code;
-}
-
-void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
-{
- MonoJumpInfo *patch_info;
- for (patch_info = ji; patch_info; patch_info = patch_info->next) {
- unsigned char *ip = patch_info->ip.i + code;
- ip = mono_arch_nacl_skip_nops(ip);
- patch_info->ip.i = ip - code;
- }
-}
-#endif /* __native_client_codegen__ */
-
typedef struct {
MonoExceptionClause *clause;
MonoBasicBlock *basic_block;
MonoJumpList *jlist;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* When this jump target gets evaluated, the method */
- /* will be installed in the dynamic code section, */
- /* not at the location of cfg->native_code. */
- ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
-#endif
mono_domain_lock (domain);
jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
else
code_domain = cfg->domain;
-#if defined(__native_client_codegen__) && defined(__native_client__)
- void *code_dest;
-
- /* This keeps patch targets from being transformed during
- * ordinary method compilation, for local branches and jumps.
- */
- nacl_allow_target_modification (FALSE);
-#endif
-
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
}
}
-#ifdef __native_client_codegen__
- mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
-#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
/* we always allocate code in cfg->domain->code_mp to increase locality */
cfg->code_size = cfg->code_len + max_epilog_size;
-#ifdef __native_client_codegen__
- cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
-#endif
+
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
} else {
code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
- nacl_allow_target_modification (TRUE);
-#endif
+
if (cfg->thunk_area) {
cfg->thunks_offset = cfg->code_size + unwindlen;
cfg->thunks = code + cfg->thunks_offset;
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
-#if defined(__default_codegen__)
g_free (cfg->native_code);
-#elif defined(__native_client_codegen__)
- if (cfg->native_code_alloc) {
- g_free (cfg->native_code_alloc);
- cfg->native_code_alloc = 0;
- }
- else if (cfg->native_code) {
- g_free (cfg->native_code);
- }
-#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
mono_arch_save_unwind_info (cfg);
#endif
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (!cfg->compile_aot) {
- if (cfg->method->dynamic) {
- code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
- } else {
- code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
- }
- }
-#endif
-
-#if defined(__native_client_codegen__)
- mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
-#endif
-
#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
{
MonoJumpInfo *ji;
patch_info.data.method = method;
g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method);
-#if defined(__native_client_codegen__) && defined(__native_client__)
- /* These patches are applied after a method has been installed, no target munging is needed. */
- nacl_allow_target_modification (FALSE);
-#endif
#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
for (tmp = jlist->list; tmp; tmp = tmp->next) {
gpointer target = mono_resolve_patch_target (NULL, target_domain, (guint8 *)tmp->data, &patch_info, TRUE, error);
#else
for (tmp = jlist->list; tmp; tmp = tmp->next)
mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE);
-#endif
-#if defined(__native_client_codegen__) && defined(__native_client__)
- nacl_allow_target_modification (TRUE);
#endif
}
}
/* Native Client functions */
gpointer mono_realloc_native_code(MonoCompile *cfg);
-#ifdef __native_client_codegen__
-void mono_nacl_align_inst(guint8 **pcode, int instlen);
-void mono_nacl_align_call(guint8 **start, guint8 **pcode);
-guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength);
-guint8 *mono_nacl_align(guint8 *code);
-void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji);
-/* Defined for each arch */
-guint8 *mono_arch_nacl_pad(guint8 *code, int pad);
-guint8 *mono_arch_nacl_skip_nops(guint8 *code);
-
-#if defined(TARGET_X86)
-#define kNaClAlignment kNaClAlignmentX86
-#define kNaClAlignmentMask kNaClAlignmentMaskX86
-#elif defined(TARGET_AMD64)
-#define kNaClAlignment kNaClAlignmentAMD64
-#define kNaClAlignmentMask kNaClAlignmentMaskAMD64
-#elif defined(TARGET_ARM)
-#define kNaClAlignment kNaClAlignmentARM
-#define kNaClAlignmentMask kNaClAlignmentMaskARM
-#endif
-
-#define NACL_BUNDLE_ALIGN_UP(p) ((((p)+kNaClAlignmentMask)) & ~kNaClAlignmentMask)
-#endif
#if defined(__native_client__) || defined(__native_client_codegen__)
extern volatile int __nacl_thread_suspension_needed;
void mono_nacl_gc(void);
#endif
-#if defined(__native_client_codegen__) || defined(__native_client__)
-#define NACL_SIZE(a, b) (b)
-#else
-#define NACL_SIZE(a, b) (a)
-#endif
-
extern MonoDebugOptions debug_options;
static inline MonoMethod*
gboolean mono_running_on_valgrind (void);
void* mono_global_codeman_reserve (int size);
-void* nacl_global_codeman_get_dest(void *data);
-void mono_global_codeman_commit(void *data, int size, int newsize);
-void nacl_global_codeman_validate(guint8 **buf_base, int buf_size, guint8 **code_end);
const char *mono_regname_full (int reg, int bank);
gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align);
void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb);
amd64_jump_code (code, addr);
g_assert ((code - start) < buf_len);
- nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
#include "mini-amd64.h"
#include "debugger-agent.h"
-#if defined(__native_client_codegen__) && defined(__native_client__)
-#include <malloc.h>
-#include <nacl/nacl_dyncode.h>
-#endif
-
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
{
guint8 *code, *start;
GSList *unwind_ops;
- int this_reg, size = NACL_SIZE (20, 32);
+ int this_reg, size = 20;
MonoDomain *domain = mono_domain_get ();
amd64_jump_reg (code, AMD64_RAX);
g_assert ((code - start) < size);
- nacl_domain_code_validate (domain, &start, size, &code);
-
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
#else
/* AOTed code could still have a non-32 bit address */
if ((((guint64)addr) >> 32) == 0)
- buf_len = NACL_SIZE (16, 32);
+ buf_len = 16;
else
- buf_len = NACL_SIZE (30, 32);
+ buf_len = 30;
#endif
start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
amd64_jump_code (code, addr);
g_assert ((code - start) < buf_len);
- nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
-#if defined(__default_codegen__)
guint8 *code;
guint8 buf [16];
gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
}
}
-#elif defined(__native_client__)
- /* These are essentially the same 2 cases as above, modified for NaCl*/
-
- /* Target must be bundle-aligned */
- g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
- /* Return target must be bundle-aligned */
- g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
-
- if (orig_code[-5] == 0xe8) {
- /* Direct call */
- int ret;
- gint32 offset = (gint32)addr - (gint32)orig_code;
- guint8 buf[sizeof(gint32)];
- *((gint32*)(buf)) = offset;
- ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
- g_assert (ret == 0);
- }
-
- else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
- int ret;
- guint8 buf[sizeof(gint32)];
- *((gint32 *)(buf)) = addr;
- /* orig_code[-14] is the start of the immediate. */
- ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
- g_assert (ret == 0);
- }
- else {
- g_assert_not_reached ();
- }
-
- return;
-#endif
}
guint8*
gint32 disp;
gpointer *plt_jump_table_entry;
-#if defined(__default_codegen__)
/* A PLT entry: jmp *<DISP>(%rip) */
g_assert (code [0] == 0xff);
g_assert (code [1] == 0x25);
disp = *(gint32*)(code + 2);
plt_jump_table_entry = (gpointer*)(code + 6 + disp);
-#elif defined(__native_client_codegen__)
- /* A PLT entry: */
- /* mov <DISP>(%rip), %r11d */
- /* nacljmp *%r11 */
-
- /* Verify the 'mov' */
- g_assert (code [0] == 0x45);
- g_assert (code [1] == 0x8b);
- g_assert (code [2] == 0x1d);
-
- disp = *(gint32*)(code + 3);
-
- /* 7 = 3 (mov opcode) + 4 (disp) */
- /* This needs to resolve to the target of the RIP-relative offset */
- plt_jump_table_entry = (gpointer*)(code + 7 + disp);
-
-#endif /* __native_client_codegen__ */
InterlockedExchangePointer (plt_jump_table_entry, addr);
}
gboolean has_caller;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- const guint kMaxCodeSize = NACL_SIZE (630, 630*2);
-
-#if defined(__native_client_codegen__)
- const guint kNaClTrampOffset = 17;
-#endif
+ const guint kMaxCodeSize = 630;
if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
has_caller = FALSE;
/* Compute the trampoline address from the return address */
if (aot) {
-#if defined(__default_codegen__)
/* 7 = length of call *<offset>(rip) */
amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
-#elif defined(__native_client_codegen__)
- amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
-#endif
} else {
/* 5 = length of amd64_call_membase () */
amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
/* Check that the stack is aligned */
-#if defined(__default_codegen__)
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
}
mono_amd64_patch (br [0], code);
//amd64_breakpoint (code);
-#endif
if (tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
/* Obtain the trampoline argument which is encoded in the instruction stream */
if (aot) {
/* Load the GOT offset */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
-#if defined(__default_codegen__)
/*
* r11 points to a call *<offset>(%rip) instruction, load the
* pc-relative offset from the instruction itself.
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
/* 7 is the length of the call, 8 is the offset to the next got slot */
amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
-#elif defined(__native_client_codegen__)
- /* The arg is hidden in a "push imm32" instruction, */
- /* add one to skip the opcode. */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
-#endif
/* Compute the address of the GOT slot */
amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
/* Load the value */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
} else {
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
-#if defined(__default_codegen__)
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
mono_amd64_patch (br [0], code);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
mono_amd64_patch (br [1], code);
-#elif defined(__native_client_codegen__)
- /* All args are 32-bit pointers in NaCl */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
-#endif
}
amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
} else {
g_assert ((code - buf) <= kMaxCodeSize);
- nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
-
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
tramp = mono_get_trampoline_code (tramp_type);
-#if defined(__default_codegen__)
if ((((guint64)arg1) >> 32) == 0)
size = 5 + 1 + 4;
else
size += 16;
code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
}
-#elif defined(__native_client_codegen__)
- size = 5 + 1 + 4;
- /* Aligning the call site below could */
- /* add up to kNaClAlignment-1 bytes */
- size += (kNaClAlignment-1);
- size = NACL_BUNDLE_ALIGN_UP (size);
- buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
- code = buf;
-#endif
if (far_addr) {
amd64_mov_reg_imm (code, AMD64_R11, tramp);
amd64_call_code (code, tramp);
}
/* The trampoline code will obtain the argument from the instruction stream */
-#if defined(__default_codegen__)
if ((((guint64)arg1) >> 32) == 0) {
*code = 0x4;
*(guint32*)(code + 1) = (gint64)arg1;
*(guint64*)(code + 1) = (gint64)arg1;
code += 9;
}
-#elif defined(__native_client_codegen__)
- /* For NaCl, all tramp args are 32-bit because they're pointers */
- *code = 0x68; /* push imm32 */
- *(guint32*)(code + 1) = (gint32)arg1;
- code += 5;
-#endif
g_assert ((code - buf) <= size);
if (code_len)
*code_len = size;
- nacl_domain_code_validate(domain, &buf, size, &code);
-
mono_arch_flush_icache (buf, size);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
index -= size - 1;
}
- tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
+ tramp_size = 64 + 8 * depth;
code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
amd64_jump_code (code, tramp);
}
- nacl_global_codeman_validate (&buf, tramp_size, &code);
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
guint32
mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
{
-#if defined(__native_client__) || defined(__native_client_codegen__)
- /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
- /* See aot-compiler.c arch_emit_plt_entry for details. */
- return *(guint32*)(plt_entry + 18);
-#else
return *(guint32*)(plt_entry + 6);
-#endif
}
/*
g_assert ((code - buf) <= buf_len);
- nacl_domain_code_validate (domain, &buf, buf_len, &code);
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
- int this_pos = 4, size = NACL_SIZE(16, 32);
+ int this_pos = 4, size = 16;
MonoDomain *domain = mono_domain_get ();
GSList *unwind_ops;
x86_jump_code (code, addr);
g_assert ((code - start) < size);
- nacl_domain_code_validate (domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
MonoDomain *domain = mono_domain_get ();
- buf_len = NACL_SIZE (10, 32);
+ buf_len = 10;
start = code = mono_domain_code_reserve (domain, buf_len);
x86_jump_code (code, addr);
g_assert ((code - start) <= buf_len);
- nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
-#if defined(__default_codegen__)
guint8 *code;
guint8 buf [8];
gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
code [4], code [5], code [6]);
g_assert_not_reached ();
}
-#elif defined(__native_client__)
- /* Target must be bundle-aligned */
- g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
-
- /* 0xe8 = call <DISP>, 0xe9 = jump <DISP> */
- if ((orig_code [-5] == 0xe8) || orig_code [-6] == 0xe9) {
- int ret;
- gint32 offset = (gint32)addr - (gint32)orig_code;
- guint8 buf[sizeof(gint32)];
- *((gint32*)(buf)) = offset;
- ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
- g_assert (ret == 0);
- } else {
- printf ("Invalid trampoline sequence %p: %02x %02x %02x %02x %02x\n", orig_code, orig_code [-5], orig_code [-4], orig_code [-3], orig_code [-2], orig_code[-1]);
- g_assert_not_reached ();
- }
-#endif
}
void
/* Patch the jump table entry used by the plt entry */
-#if defined(__native_client_codegen__) || defined(__native_client__)
- /* for both compiler and runtime */
- /* A PLT entry: */
- /* mov <DISP>(%ebx), %ecx */
- /* and 0xffffffe0, %ecx */
- /* jmp *%ecx */
- g_assert (code [0] == 0x8b);
- g_assert (code [1] == 0x8b);
-
- offset = *(guint32*)(code + 2);
-#elif defined(__default_codegen__)
/* A PLT entry: jmp *<DISP>(%ebx) */
g_assert (code [0] == 0xff);
g_assert (code [1] == 0xa3);
offset = *(guint32*)(code + 2);
-#endif /* __native_client_codegen__ */
if (!got)
got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
*(guint8**)((guint8*)got + offset) = addr;
static gpointer
get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
{
- const int kBufSize = NACL_SIZE (8, 16);
+ const int kBufSize = 8;
guint8 buf [64];
guint8 reg = 0;
gint32 disp = 0;
if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
reg = code [1] & 0x07;
disp = *((gint32*)(code + 2));
-#if defined(__native_client_codegen__) || defined(__native_client__)
- } else if ((code[1] == 0x83) && (code[2] == 0xe1) && (code[4] == 0xff) &&
- (code[5] == 0xd1) && (code[-5] == 0x8b)) {
- disp = *((gint32*)(code - 3));
- reg = code[-4] & 0x07;
- } else if ((code[-2] == 0x8b) && (code[1] == 0x83) && (code[4] == 0xff)) {
- reg = code[-1] & 0x07;
- disp = (signed char)code[0];
-#endif
} else {
g_assert_not_reached ();
return NULL;
}
}
- nacl_global_codeman_validate (&buf, 256, &code);
g_assert ((code - buf) <= 256);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
tramp = mono_get_trampoline_code (tramp_type);
- code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, NACL_SIZE (4, kNaClAlignment));
+ code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4);
x86_push_imm (buf, arg1);
x86_jump_code (buf, tramp);
g_assert ((buf - code) <= TRAMPOLINE_SIZE);
- nacl_domain_code_validate (domain, &code, NACL_SIZE (4, kNaClAlignment), &buf);
-
mono_arch_flush_icache (code, buf - code);
mono_profiler_code_buffer_new (code, buf - code, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
index -= size - 1;
}
-#if defined(__default_codegen__)
tramp_size = (aot ? 64 : 36) + 6 * depth;
-#elif defined(__native_client_codegen__)
- tramp_size = (aot ? 64 : 36) + 2 * kNaClAlignment +
- 6 * (depth + kNaClAlignment);
-#endif
code = buf = mono_global_codeman_reserve (tramp_size);
x86_jump_code (code, tramp);
}
- nacl_global_codeman_validate (&buf, tramp_size, &code);
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
x86_jump_reg (code, X86_EAX);
- nacl_global_codeman_validate (&buf, tramp_size, &code);
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
x86_jump_code (code, tramp);
- nacl_global_codeman_validate (&buf, tramp_size, &code);
-
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
g_assert (code - buf <= tramp_size);
guint32
mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
{
- return *(guint32*)(plt_entry + NACL_SIZE (6, 12));
+ return *(guint32*)(plt_entry + 6);
}
/*
x86_jump_code (code, addr);
g_assert ((code - start) <= buf_len);
- nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
#include <valgrind/memcheck.h>
#endif
-#if defined(__native_client_codegen__) && defined(__native_client__)
-#include <malloc.h>
-#include <nacl/nacl_dyncode.h>
-#include <mono/mini/mini.h>
-#endif
#include <mono/utils/mono-os-mutex.h>
#define MIN_ALIGN 16
#else
#define MIN_ALIGN 8
-#endif
-#ifdef __native_client_codegen__
-/* For Google Native Client, all targets of indirect control flow need to */
-/* be aligned to bundle boundary. 16 bytes on ARM, 32 bytes on x86.
- * MIN_ALIGN was updated to force alignment for calls from
- * tramp-<arch>.c to mono_global_codeman_reserve() */
-/* and mono_domain_code_reserve(). */
-#undef MIN_ALIGN
-#define MIN_ALIGN kNaClBundleSize
-
#endif
/* if a chunk has less than this amount of free space it's considered full */
CodeChunk *current;
CodeChunk *full;
CodeChunk *last;
-#if defined(__native_client_codegen__) && defined(__native_client__)
- GHashTable *hash;
-#endif
};
#define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
-#if defined(__native_client_codegen__) && defined(__native_client__)
-/* End of text segment, set by linker.
- * Dynamic text starts on the next allocated page.
- */
-extern char etext[];
-char *next_dynamic_code_addr = NULL;
-
-/*
- * This routine gets the next available bundle aligned
- * pointer in the dynamic code section. It does not check
- * for the section end, this error will be caught in the
- * service runtime.
- */
-void*
-allocate_code(intptr_t increment)
-{
- char *addr;
- if (increment < 0) return NULL;
- increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment;
- addr = next_dynamic_code_addr;
- next_dynamic_code_addr += increment;
- return addr;
-}
-
-int
-nacl_is_code_address (void *target)
-{
- return (char *)target < next_dynamic_code_addr;
-}
-
-/* Fill code buffer with arch-specific NOPs. */
-void
-mono_nacl_fill_code_buffer (guint8 *data, int size);
-
-#ifndef USE_JUMP_TABLES
-const int kMaxPatchDepth = 32;
-__thread unsigned char **patch_source_base = NULL;
-__thread unsigned char **patch_dest_base = NULL;
-__thread int *patch_alloc_size = NULL;
-__thread int patch_current_depth = -1;
-__thread int allow_target_modification = 1;
-
-static void
-nacl_jit_check_init ()
-{
- if (patch_source_base == NULL) {
- patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
- }
-}
-#endif
-
-void
-nacl_allow_target_modification (int val)
-{
-#ifndef USE_JUMP_TABLES
- allow_target_modification = val;
-#endif /* USE_JUMP_TABLES */
-}
-
-/* Given a patch target, modify the target such that patching will work when
- * the code is copied to the data section.
- */
-void*
-nacl_modify_patch_target (unsigned char *target)
-{
- /*
- * There's no need in patch tricks for jumptables,
- * as we always patch same jumptable.
- */
-#ifndef USE_JUMP_TABLES
- /* This seems like a bit of an ugly way to do this but the advantage
- * is we don't have to worry about all the conditions in
- * mono_resolve_patch_target, and it can be used by all the bare uses
- * of <arch>_patch.
- */
- unsigned char *sb;
- unsigned char *db;
-
- if (!allow_target_modification) return target;
-
- nacl_jit_check_init ();
- sb = patch_source_base[patch_current_depth];
- db = patch_dest_base[patch_current_depth];
-
- if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) {
- /* Do nothing. target is in the section being generated.
- * no need to modify, the disp will be the same either way.
- */
- } else {
- int target_offset = target - db;
- target = sb + target_offset;
- }
-#endif
- return target;
-}
-
-void*
-nacl_inverse_modify_patch_target (unsigned char *target)
-{
- /*
- * There's no need in patch tricks for jumptables,
- * as we always patch same jumptable.
- */
-#ifndef USE_JUMP_TABLES
- unsigned char *sb;
- unsigned char *db;
- int target_offset;
-
- if (!allow_target_modification) return target;
-
- nacl_jit_check_init ();
- sb = patch_source_base[patch_current_depth];
- db = patch_dest_base[patch_current_depth];
-
- target_offset = target - sb;
- target = db + target_offset;
-#endif
- return target;
-}
-
-
-#endif /* __native_client_codegen && __native_client__ */
-
#define VALLOC_FREELIST_SIZE 16
static mono_mutex_t valloc_mutex;
MonoCodeManager*
mono_code_manager_new (void)
{
- MonoCodeManager *cman = (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager));
- if (!cman)
- return NULL;
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (next_dynamic_code_addr == NULL) {
- const guint kPageMask = 0xFFFF; /* 64K pages */
- next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask;
-#if defined (__GLIBC__)
- /* TODO: For now, just jump 64MB ahead to avoid dynamic libraries. */
- next_dynamic_code_addr += (uintptr_t)0x4000000;
-#else
- /* Workaround bug in service runtime, unable to allocate */
- /* from the first page in the dynamic code section. */
- next_dynamic_code_addr += (uintptr_t)0x10000;
-#endif
- }
- cman->hash = g_hash_table_new (NULL, NULL);
-# ifndef USE_JUMP_TABLES
- if (patch_source_base == NULL) {
- patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
- }
-# endif
-#endif
- return cman;
+ return (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager));
}
/**
void*
mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
{
-#if !defined(__native_client__) || !defined(__native_client_codegen__)
CodeChunk *chunk, *prev;
void *ptr;
guint32 align_mask = alignment - 1;
ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
chunk->pos = ((char*)ptr - chunk->data) + size;
return ptr;
-#else
- unsigned char *temp_ptr, *code_ptr;
- /* Round up size to next bundle */
- alignment = kNaClBundleSize;
- size = (size + kNaClBundleSize) & (~kNaClBundleMask);
- /* Allocate a temp buffer */
- temp_ptr = memalign (alignment, size);
- g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0);
- /* Allocate code space from the service runtime */
- code_ptr = allocate_code (size);
- /* Insert pointer to code space in hash, keyed by buffer ptr */
- g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
-
-#ifndef USE_JUMP_TABLES
- nacl_jit_check_init ();
-
- patch_current_depth++;
- patch_source_base[patch_current_depth] = temp_ptr;
- patch_dest_base[patch_current_depth] = code_ptr;
- patch_alloc_size[patch_current_depth] = size;
- g_assert (patch_current_depth < kMaxPatchDepth);
-#endif
-
- return temp_ptr;
-#endif
}
/**
void
mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
{
-#if !defined(__native_client__) || !defined(__native_client_codegen__)
g_assert (newsize <= size);
if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
cman->current->pos -= size - newsize;
}
-#else
- unsigned char *code;
- int status;
- g_assert (NACL_BUNDLE_ALIGN_UP(newsize) <= size);
- code = g_hash_table_lookup (cman->hash, data);
- g_assert (code != NULL);
- mono_nacl_fill_code_buffer ((uint8_t*)data + newsize, size - newsize);
- newsize = NACL_BUNDLE_ALIGN_UP(newsize);
- g_assert ((GPOINTER_TO_UINT (data) & kNaClBundleMask) == 0);
- g_assert ((newsize & kNaClBundleMask) == 0);
- status = nacl_dyncode_create (code, data, newsize);
- if (status != 0) {
- unsigned char *codep;
- fprintf(stderr, "Error creating Native Client dynamic code section attempted to be\n"
- "emitted at %p (hex dissasembly of code follows):\n", code);
- for (codep = data; codep < data + newsize; codep++)
- fprintf(stderr, "%02x ", *codep);
- fprintf(stderr, "\n");
- g_assert_not_reached ();
- }
- g_hash_table_remove (cman->hash, data);
-# ifndef USE_JUMP_TABLES
- g_assert (data == patch_source_base[patch_current_depth]);
- g_assert (code == patch_dest_base[patch_current_depth]);
- patch_current_depth--;
- g_assert (patch_current_depth >= -1);
-# endif
- free (data);
-#endif
-}
-
-#if defined(__native_client_codegen__) && defined(__native_client__)
-void *
-nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data)
-{
- return g_hash_table_lookup (cman->hash, data);
}
-#endif
/**
* mono_code_manager_size:
*used_size = used;
return size;
}
-
-#ifdef __native_client_codegen__
-# if defined(TARGET_ARM)
-/* Fill empty space with UDF instruction used as halt on ARM. */
-void
-mono_nacl_fill_code_buffer (guint8 *data, int size)
-{
- guint32* data32 = (guint32*)data;
- int i;
- g_assert(size % 4 == 0);
- for (i = 0; i < size / 4; i++)
- data32[i] = 0xE7FEDEFF;
-}
-# elif (defined(TARGET_X86) || defined(TARGET_AMD64))
-/* Fill empty space with HLT instruction */
-void
-mono_nacl_fill_code_buffer(guint8 *data, int size)
-{
- memset (data, 0xf4, size);
-}
-# else
-# error "Not ported"
-# endif
-#endif
typedef int (*MonoCodeManagerFunc) (void *data, int csize, int size, void *user_data);
void mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data);
-#if defined( __native_client_codegen__ ) && defined( __native_client__ )
-
-#ifdef __arm__
-#define kNaClBundleSize 16
-#else
-#define kNaClBundleSize 32
-#endif
-#define kNaClBundleMask (kNaClBundleSize-1)
-
-#ifndef USE_JUMP_TABLES
-extern __thread unsigned char **patch_source_base;
-extern __thread unsigned char **patch_dest_base;
-extern __thread int patch_current_depth;
-#endif
-
-int nacl_is_code_address (void *target);
-void* nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data);
-void nacl_allow_target_modification (int val);
-void* nacl_modify_patch_target (unsigned char *target);
-void* nacl_inverse_modify_patch_target (unsigned char *target);
-#endif /* __native_client__ */
-
#endif /* __MONO_CODEMAN_H__ */