#if defined(__I386__)
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
+ break;
#else
+ {
+ fieldinfo* fi;
+ patchref_t* pr;
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field* uf = iptr->sx.s23.s3.uf;
fieldtype = uf->fieldref->parseddesc.fd->type;
disp = dseg_add_unique_address(cd, 0);
- patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+ pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
}
else {
- fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
+ fi = iptr->sx.s23.s3.fmiref->p.field;
fieldtype = fi->type;
disp = dseg_add_address(cd, fi->value);
}
}
+#if defined(USES_PATCHABLE_MEMORY_BARRIER)
+ codegen_emit_patchable_barrier(iptr, cd, pr, fi);
+#endif
+
// XXX X86_64: Here We had this:
/* This approach is much faster than moving the field
address inline into a register. */
break;
}
emit_store_dst(jd, iptr, d);
-#endif
break;
+ }
+#endif
case ICMD_PUTSTATIC: /* ..., value ==> ... */
#if defined(__I386__)
// Generate architecture specific instructions.
codegen_emit_instruction(jd, iptr);
+ break;
#else
+ {
+ fieldinfo* fi;
+ patchref_t* pr;
+
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field* uf = iptr->sx.s23.s3.uf;
fieldtype = uf->fieldref->parseddesc.fd->type;
disp = dseg_add_unique_address(cd, 0);
- patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
+ pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
}
else {
- fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
+ fi = iptr->sx.s23.s3.fmiref->p.field;
fieldtype = fi->type;
disp = dseg_add_address(cd, fi->value);
M_DST(s1, REG_ITMP1, 0);
break;
}
+#if defined(USES_PATCHABLE_MEMORY_BARRIER)
+ codegen_emit_patchable_barrier(iptr, cd, pr, fi);
#endif
break;
+ }
+#endif
/* branch operations **********************************************/
continue;
if (!md->params[i].inmemory) {
- assert(ARG_CNT > 0);
- s1 = emit_load(jd, iptr, var, d);
-
switch (var->type) {
case TYPE_ADR:
case TYPE_INT:
- assert(INT_ARG_CNT > 0);
+ s1 = emit_load(jd, iptr, var, d);
emit_imove(cd, s1, d);
break;
case TYPE_LNG:
+ s1 = emit_load(jd, iptr, var, d);
emit_lmove(cd, s1, d);
break;
case TYPE_FLT:
#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
+ s1 = emit_load(jd, iptr, var, d);
emit_fmove(cd, s1, d);
#else
+ s1 = emit_load(jd, iptr, var, REG_FTMP1);
M_CAST_F2I(s1, d);
#endif
break;
case TYPE_DBL:
#if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
+ s1 = emit_load(jd, iptr, var, d);
emit_dmove(cd, s1, d);
#else
+ s1 = emit_load(jd, iptr, var, REG_FTMP1);
M_CAST_D2L(s1, d);
#endif
break;