#include "jit-icalls.h"
#define MAX_INLINE_COPIES 10
+#define MAX_INLINE_COPY_SIZE 10000
void
mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
int val_reg;
/*FIXME arbitrary hack to avoid unbound code expansion.*/
- g_assert (size < 10000);
+ g_assert (size < MAX_INLINE_COPY_SIZE);
g_assert (val == 0);
g_assert (align > 0);
int cur_reg;
/*FIXME arbitrary hack to avoid unbound code expansion.*/
- g_assert (size < 10000);
+ g_assert (size < MAX_INLINE_COPY_SIZE);
g_assert (align > 0);
if (align < SIZEOF_VOID_P) {
mini_emit_memset_internal (cfg, dest, NULL, value, NULL, size, align);
}
+
+static void
+create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
+{
+ MonoClassField *field;
+ gpointer iter = NULL;
+
+ while ((field = mono_class_get_fields (klass, &iter))) {
+ int foffset;
+
+ if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
+ continue;
+ foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
+ if (mini_type_is_reference (mono_field_get_type (field))) {
+ g_assert ((foffset % SIZEOF_VOID_P) == 0);
+ *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
+ } else {
+ MonoClass *field_class = mono_class_from_mono_type (field->type);
+ if (field_class->has_references)
+ create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
+ }
+ }
+}
+
+static gboolean
+mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
+{
+ int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
+ unsigned need_wb = 0;
+
+ if (align == 0)
+ align = 4;
+
+ /*types with references can't have alignment smaller than sizeof(void*) */
+ if (align < SIZEOF_VOID_P)
+ return FALSE;
+
+ if (size > 5 * SIZEOF_VOID_P)
+ return FALSE;
+
+ create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
+
+ destreg = iargs [0]->dreg;
+ srcreg = iargs [1]->dreg;
+ offset = 0;
+
+ dest_ptr_reg = alloc_preg (cfg);
+ tmp_reg = alloc_preg (cfg);
+
+ /*tmp = dreg*/
+ EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
+
+ while (size >= SIZEOF_VOID_P) {
+ MonoInst *load_inst;
+ MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
+ load_inst->dreg = tmp_reg;
+ load_inst->inst_basereg = srcreg;
+ load_inst->inst_offset = offset;
+ MONO_ADD_INS (cfg->cbb, load_inst);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
+
+ if (need_wb & 0x1)
+ mini_emit_write_barrier (cfg, iargs [0], load_inst);
+
+ offset += SIZEOF_VOID_P;
+ size -= SIZEOF_VOID_P;
+ need_wb >>= 1;
+
+ /*tmp += sizeof (void*)*/
+ if (size >= SIZEOF_VOID_P) {
+ NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
+ MONO_ADD_INS (cfg->cbb, iargs [0]);
+ }
+ }
+
+ /* Those cannot be references since size < sizeof (void*) */
+ while (size >= 4) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 4;
+ size -= 4;
+ }
+
+ while (size >= 2) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 2;
+ size -= 2;
+ }
+
+ while (size >= 1) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 1;
+ size -= 1;
+ }
+
+ return TRUE;
+}
+
static void
-mini_emit_memory_copy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
+mini_emit_memory_copy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, int explicit_align, gboolean native)
{
MonoInst *iargs [4];
int size;
MonoInst *memcpy_ins = NULL;
g_assert (klass);
- g_assert (!(native && klass->has_references));
+ /*
+ Fun fact about @native. It's false that @klass will have no ref when @native is true.
+ This happens in pinvoke2. What goes is that marshal.c uses CEE_MONO_LDOBJNATIVE and pass klass.
+ The actual stuff being copied will have no refs, but @klass might.
+ This means we can't assert !(klass->has_references && native).
+ */
if (cfg->gshared)
klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
if (!align)
align = SIZEOF_VOID_P;
+ if (explicit_align)
+ align = explicit_align;
- if (mini_type_is_reference (&klass->byval_arg)) {
+ if (mini_type_is_reference (&klass->byval_arg)) { // Refs *MUST* be naturally aligned
MonoInst *store, *load;
int dreg = alloc_ireg_ref (cfg);
{
MonoInst *ins;
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
+ if (ins_flag & MONO_INST_UNALIGNED) {
+ MonoInst *addr, *tmp_var;
+ int align;
+ int size = mono_type_size (type, &align);
+
+ if (offset) {
+ MonoInst *add_offset;
+ NEW_BIALU_IMM (cfg, add_offset, OP_PADD_IMM, alloc_preg (cfg), src->dreg, offset);
+ MONO_ADD_INS (cfg->cbb, add_offset);
+ src = add_offset;
+ }
+
+ tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
+ EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
+
+ mini_emit_memcpy_const_size (cfg, addr, src, size, 1);
+ EMIT_NEW_TEMPLOAD (cfg, ins, tmp_var->inst_c0);
+ } else {
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
+ }
ins->flags |= ins_flag;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
+
+ if (ins_flag & MONO_INST_UNALIGNED) {
+ MonoInst *addr, *mov, *tmp_var;
+
+ tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
+ EMIT_NEW_TEMPSTORE (cfg, mov, tmp_var->inst_c0, value);
+ EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
+ mini_emit_memory_copy_internal (cfg, dest, addr, mono_class_from_mono_type (type), 1, FALSE);
+ }
+
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, type, dest->dreg, 0, value->dreg);
void
mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag)
{
- int align = SIZEOF_VOID_P;
+ int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : SIZEOF_VOID_P;
/*
* FIXME: It's unclear whether we should be emitting both the acquire
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
}
- if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST) && size->inst_c0 < 10000) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST)) {
mini_emit_memcpy_const_size (cfg, dest, src, size->inst_c0, align);
} else {
- if (cfg->verbose_level > 3)
- printf ("EMITING REGULAR COPY\n");
mini_emit_memcpy_internal (cfg, dest, src, size, 0, align);
}
void
mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag)
{
- int align = SIZEOF_VOID_P;
+ int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : SIZEOF_VOID_P;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
}
-/* If @klass is a VT it copies it's value, if it's a ref type, it copies the pointer itself. */
+/*
+ * If @klass is a valuetype, emit code to copy a value with source address in @src and destination address in @dest.
+ * If @klass is a ref type, copy a pointer instead.
+ */
+
void
mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag)
{
+ int explicit_align = 0;
+ if (ins_flag & MONO_INST_UNALIGNED)
+ explicit_align = 1;
+
/*
* FIXME: It's unclear whether we should be emitting both the acquire
* and release barriers for cpblk. It is technically both a load and
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
}
- mini_emit_memory_copy_internal (cfg, dest, src, klass, native);
+ mini_emit_memory_copy_internal (cfg, dest, src, klass, explicit_align, native);
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */