2 * Emit memory access for the front-end.
7 #include <mono/utils/mono-compiler.h>
11 #include <mono/utils/mono-memory-model.h>
16 #define MAX_INLINE_COPIES 10
19 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
28 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
31 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
34 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
37 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
39 #if SIZEOF_REGISTER == 8
41 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
47 val_reg = alloc_preg (cfg);
49 if (SIZEOF_REGISTER == 8)
50 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
52 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
55 /* This could be optimized further if neccesary */
57 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
64 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
66 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
71 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
78 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
83 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
88 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
95 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
102 /*FIXME arbitrary hack to avoid unbound code expansion.*/
103 g_assert (size < 10000);
106 /* This could be optimized further if neccesary */
108 cur_reg = alloc_preg (cfg);
109 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
117 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
119 cur_reg = alloc_preg (cfg);
120 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
121 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
129 cur_reg = alloc_preg (cfg);
130 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
131 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
137 cur_reg = alloc_preg (cfg);
138 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
145 cur_reg = alloc_preg (cfg);
146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
155 mini_emit_memcpy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size_ins, int size, int align)
157 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
159 /* We can't do copies at a smaller granule than the provided alignment */
160 if (size_ins || ((size / align > MAX_INLINE_COPIES) && !(cfg->opt & MONO_OPT_INTRINS))) {
166 EMIT_NEW_ICONST (cfg, size_ins, size);
167 iargs [2] = size_ins;
168 mono_emit_method_call (cfg, mini_get_memcpy_method (), iargs, NULL);
170 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, size, align);
175 mini_emit_memset_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *value_ins, int value, MonoInst *size_ins, int size, int align)
177 /* FIXME: Optimize the case when dest is OP_LDADDR */
179 /* We can't do copies at a smaller granule than the provided alignment */
180 if (value_ins || size_ins || value != 0 || ((size / align > MAX_INLINE_COPIES) && !(cfg->opt & MONO_OPT_INTRINS))) {
185 EMIT_NEW_ICONST (cfg, value_ins, value);
186 iargs [1] = value_ins;
189 EMIT_NEW_ICONST (cfg, size_ins, size);
190 iargs [2] = size_ins;
192 mono_emit_method_call (cfg, mini_get_memset_method (), iargs, NULL);
194 mini_emit_memset (cfg, dest->dreg, 0, size, value, align);
199 mini_emit_memcpy_const_size (MonoCompile *cfg, MonoInst *dest, MonoInst *src, int size, int align)
201 mini_emit_memcpy_internal (cfg, dest, src, NULL, size, align);
205 mini_emit_memset_const_size (MonoCompile *cfg, MonoInst *dest, int value, int size, int align)
207 mini_emit_memset_internal (cfg, dest, NULL, value, NULL, size, align);
211 mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag)
215 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
216 ins->flags |= ins_flag;
218 if (ins_flag & MONO_INST_VOLATILE) {
219 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
220 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
228 mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag)
232 if (ins_flag & MONO_INST_VOLATILE) {
233 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
234 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
236 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
238 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, type, dest->dreg, 0, value->dreg);
239 ins->flags |= ins_flag;
240 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
241 mini_type_is_reference (type) && !MONO_INS_IS_PCONST_NULL (value)) {
242 /* insert call to write barrier */
243 mini_emit_write_barrier (cfg, dest, value);
248 mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag)
250 int align = SIZEOF_VOID_P;
253 * FIXME: It's unclear whether we should be emitting both the acquire
254 * and release barriers for cpblk. It is technically both a load and
255 * store operation, so it seems like that's the sensible thing to do.
257 * FIXME: We emit full barriers on both sides of the operation for
258 * simplicity. We should have a separate atomic memcpy method instead.
260 if (ins_flag & MONO_INST_VOLATILE) {
261 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
262 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
265 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST)) {
266 mini_emit_memcpy_const_size (cfg, dest, src, size->inst_c0, align);
268 if (cfg->verbose_level > 3)
269 printf ("EMITING REGULAR COPY\n");
270 mini_emit_memcpy_internal (cfg, dest, src, size, 0, align);
273 if (ins_flag & MONO_INST_VOLATILE) {
274 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
275 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
280 mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag)
282 int align = SIZEOF_VOID_P;
284 if (ins_flag & MONO_INST_VOLATILE) {
285 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
286 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
289 //FIXME unrolled memset only supports zeroing
290 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST) && (value->opcode == OP_ICONST) && (value->inst_c0 == 0)) {
291 mini_emit_memset_const_size (cfg, dest, value->inst_c0, size->inst_c0, align);
293 mini_emit_memset_internal (cfg, dest, value, 0, size, 0, align);