gc_spill_slot_liveness_def: len:0
gc_param_slot_liveness_def: len:0
+generic_class_init: src1:i len:32
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
+#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
+ MonoInst *ins;
+
+ /*
+ * For LLVM, this requires that the code in the generic trampoline obtain the vtable argument according to
+ * the normal calling convention of the platform.
+ */
+ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
+ ins->sreg1 = vtable_arg->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+#else
if (COMPILE_LLVM (cfg))
call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
else
call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
cfg->uses_vtable_reg = TRUE;
+#endif
}
static void
amd64_pop_reg (code, AMD64_RDI);
break;
}
+ case OP_GENERIC_CLASS_INIT: {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ amd64_test_membase_imm_size (code, ins->sreg1, byte_offset, bitmask, 1);
+ jump = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+
+ if (ins->sreg1 != MONO_AMD64_ARG_REG1)
+ amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, ins->sreg1, 8);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
+
+ x86_patch (jump, code);
+ break;
+ }
+
case OP_X86_LEA:
amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
#define MONO_ARCH_HAVE_DUMMY_INIT 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_PATCH_CODE_NEW 1
+#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#if defined(TARGET_OSX) || defined(__linux__)
#define MONO_ARCH_HAVE_TLS_GET_REG 1
typedef struct {
LLVMModuleRef module;
LLVMValueRef throw, rethrow, throw_corlib_exception;
+ LLVMValueRef generic_class_init_tramp;
GHashTable *llvm_types;
LLVMValueRef got_var;
const char *got_symbol;
CHECK_FAILURE (ctx);
break;
}
+ case OP_GENERIC_CLASS_INIT: {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ LLVMValueRef flags_load, cmp;
+ MonoMethodSignature *sig;
+ const char *icall_name;
+ LLVMValueRef callee;
+ LLVMBasicBlockRef init_bb, noinit_bb;
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ flags_load = emit_load (ctx, bb, &builder, 1, convert (ctx, lhs, LLVMPointerType (LLVMInt8Type(), 0)), "", FALSE);
+ set_metadata_flag (flags_load, "mono.nofail.load");
+ cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, flags_load, LLVMConstInt (LLVMInt8Type (), bitmask, 0), ""), LLVMConstInt (LLVMInt1Type (), 1, FALSE), "");
+
+ callee = ctx->lmodule->generic_class_init_tramp;
+ if (!callee) {
+ icall_name = "specific_trampoline_generic_class_init";
+ sig = mono_metadata_signature_alloc (mono_get_corlib (), 1);
+ sig->ret = &mono_get_void_class ()->byval_arg;
+ sig->params [0] = &mono_get_int64_class ()->byval_arg;
+ if (cfg->compile_aot) {
+ callee = get_plt_entry (ctx, sig_to_llvm_sig (ctx, sig), MONO_PATCH_INFO_INTERNAL_METHOD, icall_name);
+ } else {
+ callee = LLVMAddFunction (module, icall_name, sig_to_llvm_sig (ctx, sig));
+ LLVMAddGlobalMapping (ctx->lmodule->ee, callee, resolve_patch (cfg, MONO_PATCH_INFO_INTERNAL_METHOD, icall_name));
+ }
+ mono_memory_barrier ();
+ ctx->lmodule->generic_class_init_tramp = callee;
+ }
+
+ init_bb = gen_bb (ctx, "INIT_BB");
+ noinit_bb = gen_bb (ctx, "NOINIT_BB");
+
+ LLVMBuildCondBr (ctx->builder, cmp, noinit_bb, init_bb);
+
+ builder = create_builder (ctx);
+ ctx->builder = builder;
+ LLVMPositionBuilderAtEnd (builder, init_bb);
+ emit_call (ctx, bb, &builder, callee, &lhs, 1);
+ LLVMBuildBr (builder, noinit_bb);
+
+ builder = create_builder (ctx);
+ ctx->builder = builder;
+ LLVMPositionBuilderAtEnd (builder, noinit_bb);
+ break;
+ }
case OP_AOTCONST: {
guint32 got_offset;
LLVMValueRef indexes [2];
*/
MINI_OP(OP_GC_PARAM_SLOT_LIVENESS_DEF, "gc_param_slot_liveness_def", NONE, NONE, NONE)
+/*
+ * Check if the class given by sreg1 was inited, if not, call
+ * mono_generic_class_init_trampoline () though a trampoline.
+ * Since the trampoline saves all registers, this doesn't clobber
+ * any registers.
+ */
+MINI_OP(OP_GENERIC_CLASS_INIT, "generic_class_init", NONE, IREG, NONE)
+
/* Arch specific opcodes */
/* #if defined(__native_client_codegen__) || defined(__native_client__) */
/* We have to define these in terms of the TARGET defines, not NaCl defines */
register_icall (mono_object_isinst_with_cache, "mono_object_isinst_with_cache", "object object ptr ptr", FALSE);
register_icall (mono_debugger_agent_user_break, "mono_debugger_agent_user_break", "void", FALSE);
+ register_icall (mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL),
+ "specific_trampoline_generic_class_init", "void", TRUE);
#ifdef TARGET_IOS
register_icall (pthread_getspecific, "pthread_getspecific", "ptr ptr", TRUE);
gpointer
mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
{
- guint8 *tramp;
guint8 *code, *buf;
- static int byte_offset = -1;
- static guint8 bitmask;
- guint8 *jump;
int tramp_size;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- tramp_size = 64;
+ tramp_size = 16;
code = buf = mono_global_codeman_reserve (tramp_size);
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
- jump = code;
- amd64_branch8 (code, X86_CC_Z, -1, 1);
-
- amd64_ret (code);
-
- x86_patch (jump, code);
-
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
- amd64_jump_reg (code, AMD64_R11);
- } else {
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
-
- /* jump to the actual trampoline */
- amd64_jump_code (code, tramp);
- }
+ /* Not used on amd64 */
+ amd64_breakpoint (code);
nacl_global_codeman_validate (&buf, tramp_size, &code);