#include <unistd.h>
#ifndef __linux__
-#include <sys/systeminfo.h>
#include <thread.h>
#endif
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-math.h>
+#include <mono/utils/mono-hwcap-sparc.h>
#include "mini-sparc.h"
#include "trace.h"
#endif
-/* Whenever the CPU supports v9 instructions */
-static gboolean sparcv9 = FALSE;
-
/* Whenever this is a 64bit executable */
#if SPARCV9
static gboolean v64 = TRUE;
void
mono_arch_cpu_init (void)
{
- guint32 dummy;
- /* make sure sparcv9 is initialized for embedded use */
- mono_arch_cpu_optimizazions(&dummy);
}
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (guint32 *exclude_mask)
+mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
- char buf [1024];
guint32 opts = 0;
*exclude_mask = 0;
-#ifndef __linux__
- if (!sysinfo (SI_ISALIST, buf, 1024))
- g_assert_not_reached ();
-#else
- /* From glibc. If the getpagesize is 8192, we're on sparc64, which
- * (in)directly implies that we're a v9 or better.
- * Improvements to this are greatly accepted...
- * Also, we don't differentiate between v7 and v8. I sense SIGILL
- * sniffing in my future.
- */
- if (getpagesize() == 8192)
- strcpy (buf, "sparcv9");
- else
- strcpy (buf, "sparcv8");
-#endif
-
- /*
+ /*
* On some processors, the cmov instructions are even slower than the
* normal ones...
*/
- if (strstr (buf, "sparcv9")) {
+ if (mono_hwcap_sparc_is_v9)
opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
- sparcv9 = TRUE;
- }
else
*exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
return opts;
}
+/*
+ * This function test for all SIMD functions supported.
+ *
+ * Returns a bitmask corresponding to all supported versions.
+ *
+ */
+guint32
+mono_arch_cpu_enumerate_simd_versions (void)
+{
+ /* SIMD is currently unimplemented */
+ return 0;
+}
+
#ifdef __GNUC__
#define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
#else /* assume Sun's compiler */
*
* Sparcv8 needs a flush every 8 bytes.
*/
- align = (sparcv9 ? 32 : 8);
+ align = (mono_hwcap_sparc_is_v9 ? 32 : 8);
start &= ~(align - 1);
end = (end + (align - 1)) & ~(align - 1);
gboolean
mono_sparc_is_v9 (void) {
- return sparcv9;
+ return mono_hwcap_sparc_is_v9;
}
gboolean
/* The address of the return value is passed in %o0 */
add_general (&gr, &stack_size, &cinfo->ret, FALSE);
cinfo->ret.reg += sparc_i0;
+ /* FIXME: Pass this after this as on other platforms */
+ NOT_IMPLEMENTED;
}
#endif
#define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
MONO_PATCH_INFO_EXC, sexc_name); \
- if (sparcv9 && ((icc) != sparc_icc_short)) { \
+ if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
sparc_branchp (code, 0, (cond), (icc), 0, 0); \
} \
else { \
((ins->inst_offset == last_ins->inst_offset - 4)) &&
(ins->inst_imm == 0) &&
(last_ins->inst_imm == 0)) {
- if (sparcv9) {
+ if (mono_hwcap_sparc_is_v9) {
last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
last_ins->inst_offset = ins->inst_offset;
MONO_DELETE_INS (bb, ins);
return FALSE;
}
-/*
- * mono_arch_get_vcall_slot:
- *
- * Determine the vtable slot used by a virtual call.
- */
-gpointer
-mono_arch_get_vcall_slot (guint8 *code8, mgreg_t *regs, int *displacement)
-{
- guint32 *code = (guint32*)(gpointer)code8;
- guint32 ins = code [0];
- guint32 prev_ins = code [-1];
-
- mono_sparc_flushw ();
-
- *displacement = 0;
-
- if (!mono_sparc_is_virtual_call (code))
- return NULL;
-
- if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
- if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
- /* ld [r1 + CONST ], r2; call r2 */
- guint32 base = sparc_inst_rs1 (prev_ins);
- gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
- gpointer base_val;
-
- g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
-
- g_assert ((base >= sparc_o0) && (base <= sparc_i7));
-
- base_val = regs [base];
-
- *displacement = disp;
-
- return (gpointer)base_val;
- }
- else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
- /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
- /* Decode a sparc_set32 */
- guint32 base = sparc_inst_rs1 (prev_ins);
- guint32 disp;
- gpointer base_val;
- guint32 s1 = code [-3];
- guint32 s2 = code [-2];
-
-#ifdef SPARCV9
- NOT_IMPLEMENTED;
-#endif
-
- /* sparc_sethi */
- g_assert (sparc_inst_op (s1) == 0);
- g_assert (sparc_inst_op2 (s1) == 4);
-
- /* sparc_or_imm */
- g_assert (sparc_inst_op (s2) == 2);
- g_assert (sparc_inst_op3 (s2) == 2);
- g_assert (sparc_inst_i (s2) == 1);
- g_assert (sparc_inst_rs1 (s2) == sparc_inst_rd (s2));
- g_assert (sparc_inst_rd (s1) == sparc_inst_rs1 (s2));
-
- disp = ((s1 & 0x3fffff) << 10) | sparc_inst_imm13 (s2);
-
- g_assert ((base >= sparc_o0) && (base <= sparc_i7));
-
- base_val = regs [base];
-
- *displacement = disp;
-
- return (gpointer)base_val;
- } else
- g_assert_not_reached ();
- }
- else
- g_assert_not_reached ();
-
- return NULL;
-}
-
#define CMP_SIZE 3
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 2
item->jmp_code = (guint8*)code;
sparc_branch (code, 0, sparc_bne, 0);
sparc_nop (code);
- sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
- sparc_ld (code, sparc_g5, 0, sparc_g5);
+ if (item->has_target_code) {
+ sparc_set (code, item->value.target_code, sparc_f5);
+ } else {
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ }
sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
sparc_nop (code);
}
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
mono_sparc_flushw ();
sparc_branch (code, 0, sparc_be, 0);
/* delay slot */
sparc_set (code, 0, sparc_o7);
- sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
+ sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg);
/* start of loop */
br [1] = code;
- if (sparcv9)
+ if (mono_hwcap_sparc_is_v9)
sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
else
sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
sparc_branch (code, 0, sparc_bl, 0);
sparc_patch (br [2], br [1]);
/* delay slot */
- sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
+ sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
sparc_patch (br [0], code);
}
break;
if (offset <= 16) {
i = 0;
while (i < offset) {
- if (sparcv9) {
+ if (mono_hwcap_sparc_is_v9) {
sparc_stx_imm (code, sparc_g0, ins->dreg, i);
i += 8;
}
}
else {
sparc_set (code, offset, sparc_o7);
- sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
+ sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
/* beginning of loop */
br [0] = code;
- if (sparcv9)
+ if (mono_hwcap_sparc_is_v9)
sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
else
sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
br [1] = code;
sparc_branch (code, 0, sparc_bne, 0);
/* delay slot */
- sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
+ sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
sparc_patch (br [1], br [0]);
}
}
case OP_IBGE_UN:
case OP_IBLE:
case OP_IBLE_UN: {
- if (sparcv9)
+ if (mono_hwcap_sparc_is_v9)
EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
else
EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
code = (guint32*)(cfg->native_code + cfg->code_len);
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
code = (guint32*)(cfg->native_code + cfg->code_len);
#endif
void
-mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
+mono_arch_tls_init (void)
{
+ MonoJitTlsData *jit_tls;
+
if (!lmf_addr_key_inited) {
int res;
}
+ jit_tls = mono_get_jit_tls ();
+
#ifdef MONO_SPARC_THR_TLS
- thr_setspecific (lmf_addr_key, &tls->lmf);
+ thr_setspecific (lmf_addr_key, &jit_tls->lmf);
#else
- pthread_setspecific (lmf_addr_key, &tls->lmf);
+ pthread_setspecific (lmf_addr_key, &jit_tls->lmf);
#endif
}
+void
+mono_arch_finish_init (void)
+{
+}
+
void
mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
{
* Returns the size of the activation frame.
*/
int
-mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, align;
CallInfo *cinfo;
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
-{
- return NULL;
-}
-
-gpointer
+mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
/* FIXME: implement */
g_assert_not_reached ();
}
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ return FALSE;
+}