/* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
- Copyright (C) 1996-2005, 2006, 2007, 2008, 2009
+ Copyright (C) 1996-2011
CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
Copyright (C) 2009 Theobroma Systems Ltd.
return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
}
+/**
+ * Fix up register locations in the case where control is transferred to an
+ * exception handler block via normal control flow (no exception).
+ */
+static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
+{
+ // Exception handlers have exactly 1 in-slot
+ assert(bptr->indepth == 1);
+ varinfo *var = VAR(bptr->invars[0]);
+ int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
+ emit_load(jd, NULL, var, d);
+ // Copy the interface variable to ITMP1 (XPTR) because that's where
+ // the handler expects it.
+ emit_imove(jd->cd, d, REG_ITMP1_XPTR);
+}
/**
* Generates machine code.
bool codegen_emit(jitdata *jd)
{
varinfo* var;
- builtintable_entry* bte;
+ builtintable_entry* bte = 0;
methoddesc* md;
int32_t s1, s2, /*s3,*/ d;
int32_t fieldtype;
case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
/* sx.val.i = constant */
+ case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
+ /* sx.val.i = constant */
case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
/* sx.val.l = constant */
+ case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
+ /* sx.val.l = constant */
case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
#if defined(__I386__)
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
+ break;
#else
+ {
+ fieldinfo* fi;
+ patchref_t* pr;
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field* uf = iptr->sx.s23.s3.uf;
fieldtype = uf->fieldref->parseddesc.fd->type;
disp = dseg_add_unique_address(cd, 0);
- patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+ pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+
+ fi = NULL; /* Silence compiler warning */
}
else {
- fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
+ fi = iptr->sx.s23.s3.fmiref->p.field;
fieldtype = fi->type;
disp = dseg_add_address(cd, fi->value);
patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
PROFILE_CYCLE_START;
}
+
+ pr = NULL; /* Silence compiler warning */
}
+#if defined(USES_PATCHABLE_MEMORY_BARRIER)
+ codegen_emit_patchable_barrier(iptr, cd, pr, fi);
+#endif
+
// XXX X86_64: Here We had this:
/* This approach is much faster than moving the field
address inline into a register. */
- // XXX ARM: M_DSEG_LOAD(REG_ITMP3, disp);
M_ALD_DSEG(REG_ITMP1, disp);
switch (fieldtype) {
M_ALD(d, REG_ITMP1, 0);
break;
case TYPE_INT:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_FLT:
+#endif
d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
M_ILD(d, REG_ITMP1, 0);
break;
case TYPE_LNG:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_DBL:
+#endif
d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
M_LLD(d, REG_ITMP1, 0);
break;
+#if !defined(ENABLE_SOFTFLOAT)
case TYPE_FLT:
d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
M_FLD(d, REG_ITMP1, 0);
d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
M_DLD(d, REG_ITMP1, 0);
break;
+#endif
+ default:
+ // Silence compiler warning.
+ d = 0;
}
emit_store_dst(jd, iptr, d);
-#endif
break;
+ }
+#endif
case ICMD_PUTSTATIC: /* ..., value ==> ... */
#if defined(__I386__)
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
+ break;
#else
+ {
+ fieldinfo* fi;
+ patchref_t* pr;
+
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field* uf = iptr->sx.s23.s3.uf;
fieldtype = uf->fieldref->parseddesc.fd->type;
disp = dseg_add_unique_address(cd, 0);
- patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+ pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+
+ fi = NULL; /* Silence compiler warning */
}
else {
- fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
+ fi = iptr->sx.s23.s3.fmiref->p.field;
fieldtype = fi->type;
disp = dseg_add_address(cd, fi->value);
patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
PROFILE_CYCLE_START;
}
+
+ pr = NULL; /* Silence compiler warning */
}
// XXX X86_64: Here We had this:
/* This approach is much faster than moving the field
address inline into a register. */
- // XXX ARM: M_DSEG_LOAD(REG_ITMP3, disp);
M_ALD_DSEG(REG_ITMP1, disp);
switch (fieldtype) {
M_AST(s1, REG_ITMP1, 0);
break;
case TYPE_INT:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_FLT:
+#endif
s1 = emit_load_s1(jd, iptr, REG_ITMP2);
M_IST(s1, REG_ITMP1, 0);
break;
case TYPE_LNG:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_DBL:
+#endif
s1 = emit_load_s1(jd, iptr, REG_LTMP23);
M_LST(s1, REG_ITMP1, 0);
break;
+#if !defined(ENABLE_SOFTFLOAT)
case TYPE_FLT:
s1 = emit_load_s1(jd, iptr, REG_FTMP2);
M_FST(s1, REG_ITMP1, 0);
s1 = emit_load_s1(jd, iptr, REG_FTMP2);
M_DST(s1, REG_ITMP1, 0);
break;
+#endif
}
+#if defined(USES_PATCHABLE_MEMORY_BARRIER)
+ codegen_emit_patchable_barrier(iptr, cd, pr, fi);
#endif
break;
+ }
+#endif
/* branch operations **********************************************/
codegen_emit_phi_moves(jd, bptr);
}
#endif
+ if (iptr->dst.block->type == BBTYPE_EXH)
+ fixup_exc_handler_interface(jd, iptr->dst.block);
emit_br(cd, iptr->dst.block);
ALIGNCODENOP;
break;
case ICMD_JSR: /* ... ==> ... */
+ assert(iptr->sx.s23.s3.jsrtarget.block->type != BBTYPE_EXH);
emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
ALIGNCODENOP;
break;
case ICMD_IFNULL: /* ..., value ==> ... */
case ICMD_IFNONNULL:
+ assert(iptr->dst.block->type != BBTYPE_EXH);
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
#if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
// register directly. Reason is, that register content is
// not 32-bit clean. Fix this!
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
#if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
if (iptr->sx.val.i == 0) {
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
case ICMD_IF_LGT:
case ICMD_IF_LLE:
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
break;
case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
#if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
#if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
case ICMD_IF_ICMPLE:
case ICMD_IF_ICMPGE:
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
#if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
case ICMD_IF_LCMPLE:
case ICMD_IF_LCMPGE:
+ assert(iptr->dst.block->type != BBTYPE_EXH);
+
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
break;
goto nowperformreturn;
case ICMD_IRETURN: /* ..., retvalue ==> ... */
+#if defined(ENABLE_SOFTFLOAT)
+ case ICMD_FRETURN:
+#endif
REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_RESULT);
goto nowperformreturn;
case ICMD_LRETURN: /* ..., retvalue ==> ... */
+#if defined(ENABLE_SOFTFLOAT)
+ case ICMD_DRETURN:
+#endif
REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_LRESULT);
emit_lmove(cd, s1, REG_LRESULT);
goto nowperformreturn;
+#if !defined(ENABLE_SOFTFLOAT)
case ICMD_FRETURN: /* ..., retvalue ==> ... */
REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_FRESULT);
- // XXX ARM: Here this was M_CAST_F2I(s1, REG_RESULT);
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
emit_fmove(cd, s1, REG_FRESULT);
+#else
+ M_CAST_F2I(s1, REG_RESULT);
+#endif
goto nowperformreturn;
case ICMD_DRETURN: /* ..., retvalue ==> ... */
REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_FRESULT);
- // XXX ARM: Here this was M_CAST_D2L(s1, REG_RESULT_PACKED);
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
emit_dmove(cd, s1, REG_FRESULT);
+#else
+ M_CAST_D2L(s1, REG_LRESULT);
+#endif
goto nowperformreturn;
+#endif
nowperformreturn:
#if !defined(NDEBUG)
continue;
if (!md->params[i].inmemory) {
- assert(ARG_CNT > 0);
- s1 = emit_load(jd, iptr, var, d);
-
switch (var->type) {
case TYPE_ADR:
case TYPE_INT:
- assert(INT_ARG_CNT > 0);
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_FLT:
+#endif
+ s1 = emit_load(jd, iptr, var, d);
emit_imove(cd, s1, d);
break;
-#if 0 //XXX For ARM:
-if (!md->params[s3].inmemory) {
- s1 = emit_load(jd, iptr, var, REG_FTMP1);
- if (IS_2_WORD_TYPE(var->type))
- M_CAST_D2L(s1, d);
- else
- M_CAST_F2I(s1, d);
-}
-#endif //XXX End of ARM!
-
case TYPE_LNG:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_DBL:
+#endif
+ s1 = emit_load(jd, iptr, var, d);
emit_lmove(cd, s1, d);
break;
+#if !defined(ENABLE_SOFTFLOAT)
case TYPE_FLT:
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
+ s1 = emit_load(jd, iptr, var, d);
emit_fmove(cd, s1, d);
+#else
+ s1 = emit_load(jd, iptr, var, REG_FTMP1);
+ M_CAST_F2I(s1, d);
+#endif
break;
case TYPE_DBL:
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
+ s1 = emit_load(jd, iptr, var, d);
emit_dmove(cd, s1, d);
+#else
+ s1 = emit_load(jd, iptr, var, REG_FTMP1);
+ M_CAST_D2L(s1, d);
+#endif
break;
+#endif
}
}
else {
break;
case TYPE_INT:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_FLT:
+#endif
#if SIZEOF_VOID_P == 4
s1 = emit_load(jd, iptr, var, REG_ITMP1);
M_IST(s1, REG_SP, d);
#endif
case TYPE_LNG:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_DBL:
+#endif
s1 = emit_load(jd, iptr, var, REG_LTMP12);
// XXX Sparc64: Here this actually was:
// M_STX(s1, REG_SP, JITSTACK + d);
M_LST(s1, REG_SP, d);
break;
+#if !defined(ENABLE_SOFTFLOAT)
case TYPE_FLT:
#if SIZEOF_VOID_P == 4
s1 = emit_load(jd, iptr, var, REG_FTMP1);
// M_DST(s1, REG_SP, JITSTACK + d);
M_DST(s1, REG_SP, d);
break;
+#endif
}
}
}
switch (md->returntype.type) {
case TYPE_INT:
case TYPE_ADR:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_FLT:
+#endif
s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
// XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
emit_imove(cd, REG_RESULT, s1);
break;
case TYPE_LNG:
+#if defined(ENABLE_SOFTFLOAT)
+ case TYPE_DBL:
+#endif
s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
// XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
emit_lmove(cd, REG_LRESULT, s1);
emit_store_dst(jd, iptr, s1);
break;
-#if 0 //XXX For ARM!!!
#if !defined(ENABLE_SOFTFLOAT)
- } else {
- s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if (IS_2_WORD_TYPE(d))
- M_CAST_L2D(REG_RESULT_PACKED, s1);
- else
- M_CAST_I2F(REG_RESULT, s1);
- }
-#endif /* !defined(ENABLE_SOFTFLOAT) */
-#endif //XXX End of ARM
-
case TYPE_FLT:
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
emit_fmove(cd, REG_FRESULT, s1);
+#else
+ s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CAST_I2F(REG_RESULT, s1);
+#endif
emit_store_dst(jd, iptr, s1);
break;
case TYPE_DBL:
+#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
emit_dmove(cd, REG_FRESULT, s1);
+#else
+ s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CAST_L2D(REG_LRESULT, s1);
+#endif
emit_store_dst(jd, iptr, s1);
break;
+#endif
case TYPE_VOID:
break;
}
#endif
+ if (bptr->next && bptr->next->type == BBTYPE_EXH)
+ fixup_exc_handler_interface(jd, bptr->next);
+
} // for all basic blocks
// Generate traps.