typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
+ int nstack_args;
} ArchDynCallInfo;
static gboolean
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgValuetypeInReg:
- break;
case ArgOnStack:
- if (!(ainfo->offset + (ainfo->arg_size / 8) <= DYN_CALL_STACK_ARGS))
- return FALSE;
break;
default:
return FALSE;
{
ArchDynCallInfo *info;
CallInfo *cinfo;
+ int i;
cinfo = get_call_info (NULL, sig);
// FIXME: Preprocess the info to speed up get_dyn_call_args ().
info->sig = sig;
info->cinfo = cinfo;
+ info->nstack_args = 0;
+
+ for (i = 0; i < cinfo->nargs; ++i) {
+ ArgInfo *ainfo = &cinfo->args [i];
+ switch (ainfo->storage) {
+ case ArgOnStack:
+ info->nstack_args = MAX (info->nstack_args, ainfo->offset + (ainfo->arg_size / 8));
+ break;
+ default:
+ break;
+ }
+ }
+ /* Align to 16 bytes */
+ if (info->nstack_args & 1)
+ info->nstack_args ++;
return (MonoDynCallInfo*)info;
}
g_free (ainfo);
}
+int
+mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
+{
+ ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
+
+ /* Extend the 'regs' field dynamically */
+ return sizeof (DynCallArgs) + (ainfo->nstack_args * sizeof (mgreg_t));
+}
+
#define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
#define GREG_TO_PTR(greg) (gpointer)(greg)
* libffi.
*/
void
-mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
+mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
param_reg_to_index_inited = 1;
}
- g_assert (buf_len >= sizeof (DynCallArgs));
-
p->res = 0;
p->ret = ret;
+ p->nstack_args = dinfo->nstack_args;
arg_index = 0;
greg = 0;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_DYN_CALL: {
- int i;
+ int i, limit_reg, index_reg, src_reg, dst_reg;
MonoInst *var = cfg->dyn_call_var;
guint8 *label;
+ guint8 *buf [16];
g_assert (var->opcode == OP_REGOFFSET);
amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double)));
amd64_patch (label, code);
+ /* Allocate param area */
+ /* This doesn't need to be freed since OP_DYN_CALL is never called in a loop */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
+ amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3);
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX);
/* Set stack args */
- for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + i) * sizeof(mgreg_t)), sizeof(mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, i * sizeof (mgreg_t), AMD64_RAX, sizeof (mgreg_t));
- }
+ /* rax/rcx/rdx/r8/r9 is scratch */
+ limit_reg = AMD64_RAX;
+ index_reg = AMD64_RCX;
+ src_reg = AMD64_R8;
+ dst_reg = AMD64_R9;
+ amd64_mov_reg_membase (code, limit_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
+ amd64_mov_reg_imm (code, index_reg, 0);
+ amd64_lea_membase (code, src_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS) * sizeof(mgreg_t)));
+ amd64_mov_reg_reg (code, dst_reg, AMD64_RSP, 8);
+ buf [0] = code;
+ x86_jump8 (code, 0);
+ buf [1] = code;
+ amd64_mov_reg_membase (code, AMD64_RDX, src_reg, 0, 8);
+ amd64_mov_membase_reg (code, dst_reg, 0, AMD64_RDX, 8);
+ amd64_alu_reg_imm (code, X86_ADD, index_reg, 1);
+ amd64_alu_reg_imm (code, X86_ADD, src_reg, 8);
+ amd64_alu_reg_imm (code, X86_ADD, dst_reg, 8);
+ amd64_patch (buf [0], code);
+ amd64_alu_reg_reg (code, X86_CMP, index_reg, limit_reg);
+ buf [2] = code;
+ x86_branch8 (code, X86_CC_LT, 0, FALSE);
+ amd64_patch (buf [2], buf [1]);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
- amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
+ amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
/* Make the call */
amd64_call_reg (code, AMD64_R10);