Authors: Andreas Krall
Reinhard Grafl
- $Id: codegen.c 865 2004-01-07 19:09:47Z edwin $
+ $Id: codegen.c 1146 2004-06-06 12:48:10Z twisti $
*/
+#include "global.h"
#include <stdio.h>
#include <signal.h>
#include "types.h"
-#include "codegen.h"
-#include "jit.h"
-#include "parse.h"
-#include "reg.h"
+#include "main.h"
+#include "jit/alpha/codegen.h"
+#include "jit/jit.h"
+#include "jit/parse.h"
+#include "jit/reg.h"
#include "builtin.h"
#include "asmpart.h"
#include "jni.h"
#include "main.h"
/* include independent code generation stuff */
-#include "codegen.inc"
-#include "reg.inc"
+#include "jit/codegen.inc"
+#include "jit/reg.inc"
/* *****************************************************************************
/* gen_nullptr_check(objreg) */
-#ifdef SOFTNULLPTRCHECK
#define gen_nullptr_check(objreg) \
- if (checknull) {\
- M_BEQZ((objreg), 0);\
- codegen_addxnullrefs(mcodeptr);\
- }
-#else
-#define gen_nullptr_check(objreg)
-#endif
+ if (checknull) { \
+ M_BEQZ((objreg), 0); \
+ codegen_addxnullrefs(mcodeptr); \
+ }
/* MCODECHECK(icnt) */
if a and b are the same int-register, no code will be generated.
*/
-#define M_INTMOVE(a,b) if(a!=b){M_MOV(a,b);}
+#define M_INTMOVE(a,b) if (a != b) { M_MOV(a, b); }
/* M_FLTMOVE:
if a and b are the same float-register, no code will be generated
*/
-#define M_FLTMOVE(a,b) if(a!=b){M_FMOV(a,b);}
+#define M_FLTMOVE(a,b) if (a != b) { M_FMOV(a, b); }
/* var_to_reg_xxx:
} sigctx_struct;
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+void thread_restartcriticalsection(ucontext_t *uc)
+{
+ void *critical;
+ if ((critical = thread_checkcritical((void*) uc->uc_mcontext.sc_pc)) != NULL)
+ uc->uc_mcontext.sc_pc = (u8) critical;
+}
+#endif
+
/* NullPointerException signal handler for hardware null pointer check */
void catch_NullPointerException(int sig, int code, sigctx_struct *sigctx)
sigset_t nsig;
int instr;
long faultaddr;
+ java_objectheader *xptr;
/* Reset signal handler - necessary for SysV, does no harm for BSD */
sigemptyset(&nsig);
sigaddset(&nsig, sig);
sigprocmask(SIG_UNBLOCK, &nsig, NULL); /* unblock signal */
- sigctx->sc_regs[REG_ITMP1_XPTR] =
- (long) proto_java_lang_NullPointerException;
+
+ xptr = new_exception(string_java_lang_NullPointerException);
+
+ sigctx->sc_regs[REG_ITMP1_XPTR] = (u8) xptr;
sigctx->sc_regs[REG_ITMP2_XPC] = sigctx->sc_pc;
- sigctx->sc_pc = (long) asm_handle_exception;
+ sigctx->sc_pc = (u8) asm_handle_exception;
return;
- }
- else {
+
+ } else {
faultaddr += (long) ((instr << 16) >> 16);
- fprintf(stderr, "faulting address: 0x%16lx\n", faultaddr);
+ fprintf(stderr, "faulting address: 0x%016lx\n", faultaddr);
panic("Stack overflow");
- }
+ }
}
/* install signal handlers we need to convert to exceptions */
if (!checknull) {
-
#if defined(SIGSEGV)
signal(SIGSEGV, (void*) catch_NullPointerException);
#endif
#if defined(SIGBUS)
signal(SIGBUS, (void*) catch_NullPointerException);
#endif
- }
+ }
}
*******************************************************************************/
-#define MethodPointer -8
-#define FrameSize -12
-#define IsSync -16
-#define IsLeaf -20
-#define IntSave -24
-#define FltSave -28
-#define ExTableSize -32
-#define ExTableStart -32
-
-#define ExEntrySize -32
-#define ExStartPC -8
-#define ExEndPC -16
-#define ExHandlerPC -24
-#define ExCatchType -32
-
void codegen()
{
int len, s1, s2, s3, d;
parentargs_base = maxmemuse + savedregs_num;
-#ifdef USE_THREADS /* space to save argument of monitor_enter */
+#if defined(USE_THREADS) /* space to save argument of monitor_enter */
if (checksync && (method->flags & ACC_SYNCHRONIZED))
parentargs_base++;
(void) dseg_addaddress(method); /* MethodPointer */
(void) dseg_adds4(parentargs_base * 8); /* FrameSize */
-#ifdef USE_THREADS
+#if defined(USE_THREADS)
/* IsSync contains the offset relative to the stack pointer for the
argument of monitor_exit used in the exception handler. Since the
(void) dseg_adds4(isleafmethod); /* IsLeaf */
(void) dseg_adds4(savintregcnt - maxsavintreguse); /* IntSave */
(void) dseg_adds4(savfltregcnt - maxsavfltreguse); /* FltSave */
+
+ dseg_addlinenumbertablesize();
+
+
(void) dseg_adds4(exceptiontablelength); /* ExTableSize */
/* create exception table */
for (ex = extable; ex != NULL; ex = ex->down) {
-
-#ifdef LOOP_DEBUG
- if (ex->start != NULL)
- printf("adding start - %d - ", ex->start->debug_nr);
- else {
- printf("PANIC - start is NULL");
- exit(-1);
- }
-#endif
-
dseg_addtarget(ex->start);
-
-#ifdef LOOP_DEBUG
- if (ex->end != NULL)
- printf("adding end - %d - ", ex->end->debug_nr);
- else {
- printf("PANIC - end is NULL");
- exit(-1);
- }
-#endif
-
dseg_addtarget(ex->end);
-
-#ifdef LOOP_DEBUG
- if (ex->handler != NULL)
- printf("adding handler - %d\n", ex->handler->debug_nr);
- else {
- printf("PANIC - handler is NULL");
- exit(-1);
- }
-#endif
-
dseg_addtarget(ex->handler);
-
(void) dseg_addaddress(ex->catchtype);
- }
+ }
/* initialize mcode variables */
/* save monitorenter argument */
-#ifdef USE_THREADS
+#if defined(USE_THREADS)
if (checksync && (method->flags & ACC_SYNCHRONIZED)) {
if (method->flags & ACC_STATIC) {
- p = dseg_addaddress (class);
+ p = dseg_addaddress(class);
M_ALD(REG_ITMP1, REG_PV, p);
- M_AST(REG_ITMP1, REG_SP, 8 * maxmemuse);
- }
- else {
- M_AST (argintregs[0], REG_SP, 8 * maxmemuse);
- }
- }
+ M_AST(REG_ITMP1, REG_SP, maxmemuse * 8);
+
+ } else {
+ M_AST (argintregs[0], REG_SP, maxmemuse * 8);
+ }
+ }
#endif
/* copy argument registers to stack and call trace function with pointer
- to arguments on stack. ToDo: save floating point registers !!!!!!!!!
+ to arguments on stack.
*/
if (runverbose) {
- int disp;
- M_LDA (REG_SP, REG_SP, -(14*8));
- M_AST(REG_RA, REG_SP, 1*8);
-
- M_LST(argintregs[0], REG_SP, 2*8);
- M_LST(argintregs[1], REG_SP, 3*8);
- M_LST(argintregs[2], REG_SP, 4*8);
- M_LST(argintregs[3], REG_SP, 5*8);
- M_LST(argintregs[4], REG_SP, 6*8);
- M_LST(argintregs[5], REG_SP, 7*8);
-
- M_DST(argfltregs[0], REG_SP, 8*8);
- M_DST(argfltregs[1], REG_SP, 9*8);
- M_DST(argfltregs[2], REG_SP, 10*8);
- M_DST(argfltregs[3], REG_SP, 11*8);
- M_DST(argfltregs[4], REG_SP, 12*8);
- M_DST(argfltregs[5], REG_SP, 13*8);
-
- p = dseg_addaddress (method);
+ s4 disp;
+ M_LDA(REG_SP, REG_SP, -((INT_ARG_CNT + FLT_ARG_CNT + 2) * 8));
+ M_AST(REG_RA, REG_SP, 1 * 8);
+
+ /* save integer argument registers */
+ for (p = 0; /* p < mparamcount && */ p < INT_ARG_CNT; p++) {
+ M_LST(argintregs[p], REG_SP, (2 + p) * 8);
+ }
+
+ /* save and copy float arguments into integer registers */
+ for (p = 0; /* p < mparamcount && */ p < FLT_ARG_CNT; p++) {
+ t = mparamtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ M_LLD(argintregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_DST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
+ p = dseg_addaddress(method);
M_ALD(REG_ITMP1, REG_PV, p);
- M_AST(REG_ITMP1, REG_SP, 0);
- p = dseg_addaddress ((void*) (builtin_trace_args));
+ M_AST(REG_ITMP1, REG_SP, 0 * 8);
+ p = dseg_addaddress((void *) builtin_trace_args);
M_ALD(REG_PV, REG_PV, p);
M_JSR(REG_RA, REG_PV);
- disp = -(int)((u1*) mcodeptr - mcodebase);
+ disp = -(int) ((u1 *) mcodeptr - mcodebase);
M_LDA(REG_PV, REG_RA, disp);
- M_ALD(REG_RA, REG_SP, 1*8);
-
- M_LLD(argintregs[0], REG_SP, 2*8);
- M_LLD(argintregs[1], REG_SP, 3*8);
- M_LLD(argintregs[2], REG_SP, 4*8);
- M_LLD(argintregs[3], REG_SP, 5*8);
- M_LLD(argintregs[4], REG_SP, 6*8);
- M_LLD(argintregs[5], REG_SP, 7*8);
-
- M_DLD(argfltregs[0], REG_SP, 8*8);
- M_DLD(argfltregs[1], REG_SP, 9*8);
- M_DLD(argfltregs[2], REG_SP, 10*8);
- M_DLD(argfltregs[3], REG_SP, 11*8);
- M_DLD(argfltregs[4], REG_SP, 12*8);
- M_DLD(argfltregs[5], REG_SP, 13*8);
-
- M_LDA (REG_SP, REG_SP, 14*8);
+ M_ALD(REG_RA, REG_SP, 1 * 8);
+
+ for (p = 0; /* p < mparamcount && */ p < INT_ARG_CNT; p++) {
+ M_LLD(argintregs[p], REG_SP, (2 + p) * 8);
}
+ for (p = 0; /* p < mparamcount && */ p < FLT_ARG_CNT; p++) {
+ t = mparamtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ } else {
+ M_DLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
+ M_LDA(REG_SP, REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + 2) * 8);
+ }
+
/* take arguments out of register or stack frame */
for (p = 0, l = 0; p < mparamcount; p++) {
}
} /* end for */
- /* call trace function */
-
-#if 0
- if (runverbose && !isleafmethod) {
- M_LDA (REG_SP, REG_SP, -8);
- p = dseg_addaddress (method);
- M_ALD(REG_ITMP1, REG_PV, p);
- M_AST(REG_ITMP1, REG_SP, 0);
- p = dseg_addaddress ((void*) (builtin_trace_args));
- M_ALD(REG_PV, REG_PV, p);
- M_JSR(REG_RA, REG_PV);
- M_LDA(REG_PV, REG_RA, -(int)((u1*) mcodeptr - mcodebase));
- M_LDA(REG_SP, REG_SP, 8);
- }
-#endif
-
/* call monitorenter function */
-#ifdef USE_THREADS
+#if defined(USE_THREADS)
if (checksync && (method->flags & ACC_SYNCHRONIZED)) {
int disp;
- p = dseg_addaddress ((void*) (builtin_monitorenter));
+ p = dseg_addaddress((void*) (builtin_monitorenter));
M_ALD(REG_PV, REG_PV, p);
- M_ALD(argintregs[0], REG_SP, 8 * maxmemuse);
+ M_ALD(argintregs[0], REG_SP, maxmemuse * 8);
M_JSR(REG_RA, REG_PV);
disp = -(int)((u1*) mcodeptr - mcodebase);
M_LDA(REG_PV, REG_RA, disp);
- }
+ }
#endif
}
case ICMD_FCONST: /* ... ==> ..., constant */
/* op1 = 0, val.f = constant */
- d = reg_of_var (iptr->dst, REG_FTMP1);
- a = dseg_addfloat (iptr->val.f);
+ d = reg_of_var(iptr->dst, REG_FTMP1);
+ a = dseg_addfloat(iptr->val.f);
M_FLD(d, REG_PV, a);
- store_reg_to_var_flt (iptr->dst, d);
+ store_reg_to_var_flt(iptr->dst, d);
break;
case ICMD_DCONST: /* ... ==> ..., constant */
/* op1 = 0, val.d = constant */
- d = reg_of_var (iptr->dst, REG_FTMP1);
- a = dseg_adddouble (iptr->val.d);
+ d = reg_of_var(iptr->dst, REG_FTMP1);
+ a = dseg_adddouble(iptr->val.d);
M_DLD(d, REG_PV, a);
- store_reg_to_var_flt (iptr->dst, d);
+ store_reg_to_var_flt(iptr->dst, d);
break;
case ICMD_ACONST: /* ... ==> ..., constant */
var_to_reg_flt(s1, src, REG_FTMP1);
d = reg_of_var(iptr->dst, REG_FTMP3);
- M_FLTMOVE(s1, d);
+ M_CVTFDS(s1, d);
+ M_TRAPB;
store_reg_to_var_flt(iptr->dst, d);
break;
- case ICMD_D2F: /* ..., value ==> ..., (double) value */
+ case ICMD_D2F: /* ..., value ==> ..., (float) value */
var_to_reg_flt(s1, src, REG_FTMP1);
d = reg_of_var(iptr->dst, REG_FTMP3);
/* memory operations **************************************************/
- /* #define gen_bound_check \
- if (checkbounds) {\
- M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
- M_CMPULT(s2, REG_ITMP3, REG_ITMP3);\
- M_BEQZ(REG_ITMP3, 0);\
- codegen_addxboundrefs(mcodeptr);\
- }
- */
-
#define gen_bound_check \
- if (checkbounds) { \
- M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
- M_CMPULT(s2, REG_ITMP3, REG_ITMP3);\
- M_BEQZ(REG_ITMP3, 0);\
- codegen_addxboundrefs(mcodeptr); \
- }
+ if (checkbounds) { \
+ M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
+ M_CMPULT(s2, REG_ITMP3, REG_ITMP3);\
+ M_BEQZ(REG_ITMP3, 0);\
+ codegen_addxboundrefs(mcodeptr, s2); \
+ }
case ICMD_ARRAYLENGTH: /* ..., arrayref ==> ..., length */
/* recompute pv */
s1 = (int) ((u1*) mcodeptr - mcodebase);
- if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
- else {
+ if (s1 <= 32768) {
+ M_LDA(REG_PV, REG_RA, -s1);
+ M_NOP;
+
+ } else {
s4 ml = -s1, mh = 0;
while (ml < -32768) { ml += 65536; mh--; }
M_LDA(REG_PV, REG_RA, ml);
/* recompute pv */
s1 = (int) ((u1*) mcodeptr - mcodebase);
- if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
- else {
+ if (s1 <= 32768) {
+ M_LDA(REG_PV, REG_RA, -s1);
+ M_NOP;
+
+ } else {
s4 ml = -s1, mh = 0;
while (ml < -32768) { ml += 65536; mh--; }
M_LDA(REG_PV, REG_RA, ml);
case ICMD_LRETURN:
case ICMD_ARETURN:
-#ifdef USE_THREADS
+ var_to_reg_int(s1, src, REG_RESULT);
+ M_INTMOVE(s1, REG_RESULT);
+
+#if defined(USE_THREADS)
if (checksync && (method->flags & ACC_SYNCHRONIZED)) {
- int disp;
- a = dseg_addaddress ((void*) (builtin_monitorexit));
+ s4 disp;
+ a = dseg_addaddress((void *) (builtin_monitorexit));
M_ALD(REG_PV, REG_PV, a);
- M_ALD(argintregs[0], REG_SP, 8 * maxmemuse);
+ M_ALD(argintregs[0], REG_SP, maxmemuse * 8);
+ M_LST(REG_RESULT, REG_SP, maxmemuse * 8);
M_JSR(REG_RA, REG_PV);
- disp = -(int)((u1*) mcodeptr - mcodebase);
+ disp = -(s4) ((u1 *) mcodeptr - mcodebase);
M_LDA(REG_PV, REG_RA, disp);
- }
+ M_LLD(REG_RESULT, REG_SP, maxmemuse * 8);
+ }
#endif
- var_to_reg_int(s1, src, REG_RESULT);
- M_INTMOVE(s1, REG_RESULT);
+
goto nowperformreturn;
case ICMD_FRETURN: /* ..., retvalue ==> ... */
case ICMD_DRETURN:
-#ifdef USE_THREADS
+ var_to_reg_flt(s1, src, REG_FRESULT);
+ M_FLTMOVE(s1, REG_FRESULT);
+
+#if defined(USE_THREADS)
if (checksync && (method->flags & ACC_SYNCHRONIZED)) {
- int disp;
- a = dseg_addaddress ((void*) (builtin_monitorexit));
+ s4 disp;
+ a = dseg_addaddress((void *) (builtin_monitorexit));
M_ALD(REG_PV, REG_PV, a);
- M_ALD(argintregs[0], REG_SP, 8 * maxmemuse);
+ M_ALD(argintregs[0], REG_SP, maxmemuse * 8);
+ M_DST(REG_FRESULT, REG_SP, maxmemuse * 8);
M_JSR(REG_RA, REG_PV);
- disp = -(int)((u1*) mcodeptr - mcodebase);
+ disp = -(s4) ((u1 *) mcodeptr - mcodebase);
M_LDA(REG_PV, REG_RA, disp);
- }
+ M_DLD(REG_FRESULT, REG_SP, maxmemuse * 8);
+ }
#endif
- var_to_reg_flt(s1, src, REG_FRESULT);
- M_FLTMOVE(s1, REG_FRESULT);
+
goto nowperformreturn;
case ICMD_RETURN: /* ... ==> ... */
-#ifdef USE_THREADS
+#if defined(USE_THREADS)
if (checksync && (method->flags & ACC_SYNCHRONIZED)) {
- int disp;
- a = dseg_addaddress ((void*) (builtin_monitorexit));
+ s4 disp;
+ a = dseg_addaddress((void *) (builtin_monitorexit));
M_ALD(REG_PV, REG_PV, a);
- M_ALD(argintregs[0], REG_SP, 8 * maxmemuse);
+ M_ALD(argintregs[0], REG_SP, maxmemuse * 8);
M_JSR(REG_RA, REG_PV);
- disp = -(int)((u1*) mcodeptr - mcodebase);
+ disp = -(s4) ((u1 *) mcodeptr - mcodebase);
M_LDA(REG_PV, REG_RA, disp);
- }
+ }
#endif
nowperformreturn:
/* call trace function */
if (runverbose) {
- M_LDA (REG_SP, REG_SP, -24);
- M_AST(REG_RA, REG_SP, 0);
- M_LST(REG_RESULT, REG_SP, 8);
- M_DST(REG_FRESULT, REG_SP,16);
- a = dseg_addaddress (method);
+ M_LDA(REG_SP, REG_SP, -3 * 8);
+ M_AST(REG_RA, REG_SP, 0 * 8);
+ M_LST(REG_RESULT, REG_SP, 1 * 8);
+ M_DST(REG_FRESULT, REG_SP, 2 * 8);
+ a = dseg_addaddress(method);
M_ALD(argintregs[0], REG_PV, a);
M_MOV(REG_RESULT, argintregs[1]);
M_FLTMOVE(REG_FRESULT, argfltregs[2]);
M_FLTMOVE(REG_FRESULT, argfltregs[3]);
- a = dseg_addaddress ((void*) (builtin_displaymethodstop));
+ a = dseg_addaddress((void *) builtin_displaymethodstop);
M_ALD(REG_PV, REG_PV, a);
- M_JSR (REG_RA, REG_PV);
- s1 = (int)((u1*) mcodeptr - mcodebase);
- if (s1<=32768) M_LDA (REG_PV, REG_RA, -s1);
+ M_JSR(REG_RA, REG_PV);
+ s1 = (int) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
else {
- s4 ml=-s1, mh=0;
- while (ml<-32768) { ml+=65536; mh--; }
- M_LDA (REG_PV, REG_RA, ml );
- M_LDAH (REG_PV, REG_PV, mh );
- }
- M_DLD(REG_FRESULT, REG_SP,16);
- M_LLD(REG_RESULT, REG_SP, 8);
- M_ALD(REG_RA, REG_SP, 0);
- M_LDA (REG_SP, REG_SP, 24);
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
}
+ M_DLD(REG_FRESULT, REG_SP, 2 * 8);
+ M_LLD(REG_RESULT, REG_SP, 1 * 8);
+ M_ALD(REG_RA, REG_SP, 0 * 8);
+ M_LDA(REG_SP, REG_SP, 3 * 8);
+ }
M_RET(REG_ZERO, REG_RA);
ALIGNCODENOP;
{
classinfo *super = (classinfo*) iptr->val.a;
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritrestart((u1*) mcodeptr - mcodebase);
+#endif
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(iptr->dst, REG_ITMP3);
if (s1 == d) {
M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
a = dseg_addaddress ((void*) super->vftbl);
M_ALD(REG_ITMP2, REG_PV, a);
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritstart((u1*) mcodeptr - mcodebase);
+#endif
M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl, baseval));
M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl, baseval));
M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl, diffval));
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritstop((u1*) mcodeptr - mcodebase);
+#endif
M_ISUB(REG_ITMP1, REG_ITMP3, REG_ITMP1);
M_CMPULE(REG_ITMP1, REG_ITMP2, d);
}
{
classinfo *super = (classinfo*) iptr->val.a;
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritrestart((u1*) mcodeptr - mcodebase);
+#endif
d = reg_of_var(iptr->dst, REG_ITMP3);
var_to_reg_int(s1, src, d);
if (iptr->op1) { /* class/interface */
M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
a = dseg_addaddress ((void*) super->vftbl);
M_ALD(REG_ITMP2, REG_PV, a);
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritstart((u1*) mcodeptr - mcodebase);
+#endif
M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl, baseval));
if (d != REG_ITMP3) {
M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl, baseval));
M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl, diffval));
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritstop((u1*) mcodeptr - mcodebase);
+#endif
M_ISUB(REG_ITMP1, REG_ITMP3, REG_ITMP1);
}
else {
M_ISUB(REG_ITMP1, REG_ITMP2, REG_ITMP1);
M_ALD(REG_ITMP2, REG_PV, a);
M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl, diffval));
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ codegen_threadcritstop((u1*) mcodeptr - mcodebase);
+#endif
}
M_CMPULE(REG_ITMP1, REG_ITMP2, REG_ITMP2);
M_BEQZ(REG_ITMP2, 0);
codegen_addxcheckarefs(mcodeptr);
break;
+ case ICMD_CHECKEXCEPTION: /* ... ==> ... */
+
+ M_BEQZ(REG_RESULT, 0);
+ codegen_addxexceptionrefs(mcodeptr);
+ break;
+
case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
/* op1 = dimension, val.a = array descriptor */
s4 *xcodeptr = NULL;
for (; xboundrefs != NULL; xboundrefs = xboundrefs->next) {
- if ((exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch((u1*) mcodebase + xboundrefs->branchpos,
- xboundrefs->branchpos, (u1*) xcodeptr - (u1*) mcodebase - 4);
- continue;
- }
-
-
gen_resolvebranch((u1*) mcodebase + xboundrefs->branchpos,
- xboundrefs->branchpos, (u1*) mcodeptr - mcodebase);
+ xboundrefs->branchpos,
+ (u1*) mcodeptr - mcodebase);
MCODECHECK(8);
+ /* move index register into REG_ITMP1 */
+ M_MOV(xboundrefs->reg, REG_ITMP1);
M_LDA(REG_ITMP2_XPC, REG_PV, xboundrefs->branchpos - 4);
if (xcodeptr != NULL) {
- int disp = (xcodeptr-mcodeptr)-1;
- M_BR(disp);
- }
- else {
+ M_BR(xcodeptr - mcodeptr - 1);
+
+ } else {
xcodeptr = mcodeptr;
- a = dseg_addaddress(proto_java_lang_ArrayIndexOutOfBoundsException);
- M_ALD(REG_ITMP1_XPTR, REG_PV, a);
+ M_LSUB_IMM(REG_SP, 1 * 8, REG_SP);
+ M_LST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(string_java_lang_ArrayIndexOutOfBoundsException);
+ M_ALD(argintregs[0], REG_PV, a);
+ M_MOV(REG_ITMP1, argintregs[1]);
+
+ a = dseg_addaddress(new_exception_int);
+ M_ALD(REG_PV, REG_PV, a);
+ M_JSR(REG_RA, REG_PV);
+
+ /* recompute pv */
+ s1 = (s4) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
+ else {
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
+ }
+
+ M_MOV(REG_RESULT, REG_ITMP1_XPTR);
+
+ M_LLD(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_LADD_IMM(REG_SP, 1 * 8, REG_SP);
a = dseg_addaddress(asm_handle_exception);
M_ALD(REG_ITMP3, REG_PV, a);
M_JMP(REG_ZERO, REG_ITMP3);
- }
}
+ }
/* generate negative array size check stubs */
for (; xcheckarefs != NULL; xcheckarefs = xcheckarefs->next) {
if ((exceptiontablelength == 0) && (xcodeptr != NULL)) {
gen_resolvebranch((u1*) mcodebase + xcheckarefs->branchpos,
- xcheckarefs->branchpos, (u1*) xcodeptr - (u1*) mcodebase - 4);
+ xcheckarefs->branchpos,
+ (u1*) xcodeptr - (u1*) mcodebase - 4);
continue;
- }
+ }
gen_resolvebranch((u1*) mcodebase + xcheckarefs->branchpos,
- xcheckarefs->branchpos, (u1*) mcodeptr - mcodebase);
+ xcheckarefs->branchpos,
+ (u1*) mcodeptr - mcodebase);
MCODECHECK(8);
M_LDA(REG_ITMP2_XPC, REG_PV, xcheckarefs->branchpos - 4);
if (xcodeptr != NULL) {
- int disp = (xcodeptr-mcodeptr)-1;
- M_BR(disp);
- }
- else {
+ M_BR(xcodeptr - mcodeptr - 1);
+
+ } else {
xcodeptr = mcodeptr;
- a = dseg_addaddress(proto_java_lang_NegativeArraySizeException);
- M_ALD(REG_ITMP1_XPTR, REG_PV, a);
+ M_LSUB_IMM(REG_SP, 1 * 8, REG_SP);
+ M_LST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(string_java_lang_NegativeArraySizeException);
+ M_ALD(argintregs[0], REG_PV, a);
+
+ a = dseg_addaddress(new_exception);
+ M_ALD(REG_PV, REG_PV, a);
+ M_JSR(REG_RA, REG_PV);
+
+ /* recompute pv */
+ s1 = (s4) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
+ else {
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
+ }
+
+ M_MOV(REG_RESULT, REG_ITMP1_XPTR);
+
+ M_LLD(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_LADD_IMM(REG_SP, 1 * 8, REG_SP);
a = dseg_addaddress(asm_handle_exception);
M_ALD(REG_ITMP3, REG_PV, a);
M_JMP(REG_ZERO, REG_ITMP3);
- }
}
+ }
/* generate cast check stubs */
for (; xcastrefs != NULL; xcastrefs = xcastrefs->next) {
if ((exceptiontablelength == 0) && (xcodeptr != NULL)) {
gen_resolvebranch((u1*) mcodebase + xcastrefs->branchpos,
- xcastrefs->branchpos, (u1*) xcodeptr - (u1*) mcodebase - 4);
+ xcastrefs->branchpos,
+ (u1*) xcodeptr - (u1*) mcodebase - 4);
continue;
- }
+ }
gen_resolvebranch((u1*) mcodebase + xcastrefs->branchpos,
- xcastrefs->branchpos, (u1*) mcodeptr - mcodebase);
+ xcastrefs->branchpos,
+ (u1*) mcodeptr - mcodebase);
MCODECHECK(8);
M_LDA(REG_ITMP2_XPC, REG_PV, xcastrefs->branchpos - 4);
if (xcodeptr != NULL) {
- int disp = (xcodeptr-mcodeptr)-1;
- M_BR(disp);
- }
- else {
+ M_BR(xcodeptr - mcodeptr - 1);
+
+ } else {
xcodeptr = mcodeptr;
- a = dseg_addaddress(proto_java_lang_ClassCastException);
- M_ALD(REG_ITMP1_XPTR, REG_PV, a);
+ M_LSUB_IMM(REG_SP, 1 * 8, REG_SP);
+ M_LST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(string_java_lang_ClassCastException);
+ M_ALD(argintregs[0], REG_PV, a);
+
+ a = dseg_addaddress(new_exception);
+ M_ALD(REG_PV, REG_PV, a);
+ M_JSR(REG_RA, REG_PV);
+
+ /* recompute pv */
+ s1 = (s4) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
+ else {
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
+ }
+
+ M_MOV(REG_RESULT, REG_ITMP1_XPTR);
+
+ M_LLD(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_LADD_IMM(REG_SP, 1 * 8, REG_SP);
a = dseg_addaddress(asm_handle_exception);
M_ALD(REG_ITMP3, REG_PV, a);
M_JMP(REG_ZERO, REG_ITMP3);
- }
}
+ }
+
+ /* generate exception check stubs */
+
+ xcodeptr = NULL;
+
+ for (; xexceptionrefs != NULL; xexceptionrefs = xexceptionrefs->next) {
+ if ((exceptiontablelength == 0) && (xcodeptr != NULL)) {
+ gen_resolvebranch((u1*) mcodebase + xexceptionrefs->branchpos,
+ xexceptionrefs->branchpos,
+ (u1*) xcodeptr - (u1*) mcodebase - 4);
+ continue;
+ }
+
+ gen_resolvebranch((u1*) mcodebase + xexceptionrefs->branchpos,
+ xexceptionrefs->branchpos,
+ (u1*) mcodeptr - mcodebase);
+
+ MCODECHECK(8);
+
+ M_LDA(REG_ITMP2_XPC, REG_PV, xexceptionrefs->branchpos - 4);
+
+ if (xcodeptr != NULL) {
+ M_BR(xcodeptr - mcodeptr - 1);
+
+ } else {
+ xcodeptr = mcodeptr;
+
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ M_LSUB_IMM(REG_SP, 1 * 8, REG_SP);
+ M_LST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(&builtin_get_exceptionptrptr);
+ M_ALD(REG_PV, REG_PV, a);
+ M_JSR(REG_RA, REG_PV);
+
+ /* recompute pv */
+ s1 = (s4) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
+ else {
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
+ }
+ M_ALD(REG_ITMP1_XPTR, REG_RESULT, 0);
+ M_AST(REG_ZERO, REG_RESULT, 0);
-#ifdef SOFTNULLPTRCHECK
+ M_LLD(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_LADD_IMM(REG_SP, 1 * 8, REG_SP);
+#else
+ a = dseg_addaddress(&_exceptionptr);
+ M_ALD(REG_ITMP3, REG_PV, a);
+ M_ALD(REG_ITMP1_XPTR, REG_ITMP3, 0);
+ M_AST(REG_ZERO, REG_ITMP3, 0);
+#endif
+
+ a = dseg_addaddress(asm_handle_exception);
+ M_ALD(REG_ITMP3, REG_PV, a);
+
+ M_JMP(REG_ZERO, REG_ITMP3);
+ }
+ }
/* generate null pointer check stubs */
for (; xnullrefs != NULL; xnullrefs = xnullrefs->next) {
if ((exceptiontablelength == 0) && (xcodeptr != NULL)) {
gen_resolvebranch((u1*) mcodebase + xnullrefs->branchpos,
- xnullrefs->branchpos, (u1*) xcodeptr - (u1*) mcodebase - 4);
+ xnullrefs->branchpos,
+ (u1*) xcodeptr - (u1*) mcodebase - 4);
continue;
- }
+ }
gen_resolvebranch((u1*) mcodebase + xnullrefs->branchpos,
- xnullrefs->branchpos, (u1*) mcodeptr - mcodebase);
+ xnullrefs->branchpos,
+ (u1*) mcodeptr - mcodebase);
MCODECHECK(8);
M_LDA(REG_ITMP2_XPC, REG_PV, xnullrefs->branchpos - 4);
if (xcodeptr != NULL) {
- int disp = (xcodeptr-mcodeptr)-1;
- M_BR(disp);
- }
- else {
+ M_BR(xcodeptr - mcodeptr - 1);
+
+ } else {
xcodeptr = mcodeptr;
- a = dseg_addaddress(proto_java_lang_NullPointerException);
- M_ALD(REG_ITMP1_XPTR, REG_PV, a);
+ M_LSUB_IMM(REG_SP, 1 * 8, REG_SP);
+ M_LST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(string_java_lang_NullPointerException);
+ M_ALD(argintregs[0], REG_PV, a);
+
+ a = dseg_addaddress(new_exception);
+ M_ALD(REG_PV, REG_PV, a);
+ M_JSR(REG_RA, REG_PV);
+
+ /* recompute pv */
+ s1 = (s4) ((u1 *) mcodeptr - mcodebase);
+ if (s1 <= 32768) M_LDA(REG_PV, REG_RA, -s1);
+ else {
+ s4 ml = -s1, mh = 0;
+ while (ml < -32768) { ml += 65536; mh--; }
+ M_LDA(REG_PV, REG_RA, ml);
+ M_LDAH(REG_PV, REG_PV, mh);
+ }
+
+ M_MOV(REG_RESULT, REG_ITMP1_XPTR);
+
+ M_LLD(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_LADD_IMM(REG_SP, 1 * 8, REG_SP);
a = dseg_addaddress(asm_handle_exception);
M_ALD(REG_ITMP3, REG_PV, a);
M_JMP(REG_ZERO, REG_ITMP3);
- }
}
-
-#endif
+ }
}
codegen_finish((int)((u1*) mcodeptr - mcodebase));
*******************************************************************************/
-#define NATIVESTUBSIZE 60
-#define NATIVESTUBOFFSET 8
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#define NATIVESTUBSTACK 2
+#define NATIVESTUBTHREADEXTRA 5
+#else
+#define NATIVESTUBSTACK 1
+#define NATIVESTUBTHREADEXTRA 0
+#endif
+
+#define NATIVESTUBSIZE (44 + NATIVESTUBTHREADEXTRA)
+#define NATIVESTATICSIZE 5
+#define NATIVEVERBOSESIZE (39 + 13)
+#define NATIVESTUBOFFSET 9
u1 *createnativestub(functionptr f, methodinfo *m)
{
u8 *s; /* memory pointer to hold the stub */
u8 *cs;
s4 *mcodeptr; /* code generation pointer */
- int stackframesize = 0; /* size of stackframe if needed */
- int disp;
+ s4 stackframesize = 0; /* size of stackframe if needed */
+ s4 disp;
+ s4 stubsize;
reg_init();
descriptor2types(m); /* set paramcount and paramtypes */
- s = CNEW(u8, NATIVESTUBSIZE); /* memory to hold the stub */
+ stubsize = NATIVESTUBSIZE; /* calculate nativestub size */
+ if ((m->flags & ACC_STATIC) && !m->class->initialized)
+ stubsize += NATIVESTATICSIZE;
+
+ if (runverbose)
+ stubsize += NATIVEVERBOSESIZE;
+
+ s = CNEW(u8, stubsize); /* memory to hold the stub */
cs = s + NATIVESTUBOFFSET;
mcodeptr = (s4 *) (cs); /* code generation pointer */
*(cs-1) = (u8) f; /* address of native method */
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ *(cs-2) = (u8) &builtin_get_exceptionptrptr;
+#else
*(cs-2) = (u8) (&_exceptionptr); /* address of exceptionptr */
+#endif
*(cs-3) = (u8) asm_handle_nat_exception; /* addr of asm exception handler */
*(cs-4) = (u8) (&env); /* addr of jni_environement */
- *(cs-5) = (u8) asm_builtin_trace;
+ *(cs-5) = (u8) builtin_trace_args;
*(cs-6) = (u8) m;
- *(cs-7) = (u8) asm_builtin_exittrace;
+ *(cs-7) = (u8) builtin_displaymethodstop;
*(cs-8) = (u8) m->class;
+ *(cs-9) = (u8) asm_check_clinit;
+
+ M_LDA(REG_SP, REG_SP, -NATIVESTUBSTACK * 8); /* build up stackframe */
+ M_AST(REG_RA, REG_SP, 0 * 8); /* store return address */
- M_LDA(REG_SP, REG_SP, -8); /* build up stackframe */
- M_AST(REG_RA, REG_SP, 0); /* store return address */
+ /* if function is static, check for initialized */
+ if (m->flags & ACC_STATIC) {
+ /* if class isn't yet initialized, do it */
+ if (!m->class->initialized) {
+ /* call helper function which patches this code */
+ M_ALD(REG_ITMP1, REG_PV, -8 * 8); /* class */
+ M_ALD(REG_PV, REG_PV, -9 * 8); /* asm_check_clinit */
+ M_JSR(REG_RA, REG_PV);
+ disp = -(s4) (mcodeptr - (s4 *) cs) * 4;
+ M_LDA(REG_PV, REG_RA, disp);
+ M_NOP; /* this is essential for code patching */
+ }
+ }
+
+ /* max. 39 instructions */
if (runverbose) {
+ s4 p;
+ s4 t;
+ M_LDA(REG_SP, REG_SP, -((INT_ARG_CNT + FLT_ARG_CNT + 2) * 8));
+ M_AST(REG_RA, REG_SP, 1 * 8);
+
+ /* save integer argument registers */
+ for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
+ M_LST(argintregs[p], REG_SP, (2 + p) * 8);
+ }
+
+ /* save and copy float arguments into integer registers */
+ for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
+ t = m->paramtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ M_LLD(argintregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ M_ILD(argintregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ } else {
+ M_DST(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
M_ALD(REG_ITMP1, REG_PV, -6 * 8);
+ M_AST(REG_ITMP1, REG_SP, 0 * 8);
M_ALD(REG_PV, REG_PV, -5 * 8);
M_JSR(REG_RA, REG_PV);
- disp = -(int) (mcodeptr - (s4*) cs) * 4;
+ disp = -(s4) (mcodeptr - (s4 *) cs) * 4;
M_LDA(REG_PV, REG_RA, disp);
+
+ for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
+ M_LLD(argintregs[p], REG_SP, (2 + p) * 8);
+ }
+
+ for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
+ t = m->paramtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ } else {
+ M_DLD(argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
+ M_ALD(REG_RA, REG_SP, 1 * 8);
+ M_LDA(REG_SP, REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + 2) * 8);
}
/* save argument registers on stack -- if we have to */
if ((m->flags & ACC_STATIC && m->paramcount > (INT_ARG_CNT - 2)) || m->paramcount > (INT_ARG_CNT - 1)) {
- int i;
- int paramshiftcnt = (m->flags & ACC_STATIC) ? 2 : 1;
- int stackparamcnt = (m->paramcount > INT_ARG_CNT) ? m->paramcount - INT_ARG_CNT : 0;
+ s4 i;
+ s4 paramshiftcnt = (m->flags & ACC_STATIC) ? 2 : 1;
+ s4 stackparamcnt = (m->paramcount > INT_ARG_CNT) ? m->paramcount - INT_ARG_CNT : 0;
stackframesize = stackparamcnt + paramshiftcnt;
M_ALD(REG_PV, REG_PV, -1 * 8); /* load adress of native method */
M_JSR(REG_RA, REG_PV); /* call native method */
- disp = -(int) (mcodeptr - (s4*) cs) * 4;
+ disp = -(s4) (mcodeptr - (s4 *) cs) * 4;
M_LDA(REG_PV, REG_RA, disp); /* recompute pv from ra */
/* remove stackframe if there is one */
M_LDA(REG_SP, REG_SP, stackframesize * 8);
}
+ /* 13 instructions */
if (runverbose) {
- M_ALD(argintregs[0], REG_PV, -6 * 8);
+ M_LDA(REG_SP, REG_SP, -2 * 8);
+ M_ALD(argintregs[0], REG_PV, -6 * 8); /* load method adress */
+ M_LST(REG_RESULT, REG_SP, 0 * 8);
+ M_DST(REG_FRESULT, REG_SP, 1 * 8);
M_MOV(REG_RESULT, argintregs[1]);
M_FMOV(REG_FRESULT, argfltregs[2]);
M_FMOV(REG_FRESULT, argfltregs[3]);
- M_ALD(REG_PV, REG_PV, -7 * 8); /* asm_builtin_exittrace */
+ M_ALD(REG_PV, REG_PV, -7 * 8); /* builtin_displaymethodstop */
M_JSR(REG_RA, REG_PV);
- disp = -(int) (mcodeptr - (s4*) cs) * 4;
+ disp = -(s4) (mcodeptr - (s4 *) cs) * 4;
M_LDA(REG_PV, REG_RA, disp);
+ M_LLD(REG_RESULT, REG_SP, 0 * 8);
+ M_DLD(REG_FRESULT, REG_SP, 1 * 8);
+ M_LDA(REG_SP, REG_SP, 2 * 8);
}
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ if (IS_FLT_DBL_TYPE(m->returntype))
+ M_DST(REG_FRESULT, REG_SP, 1 * 8);
+ else
+ M_AST(REG_RESULT, REG_SP, 1 * 8);
+ M_ALD(REG_PV, REG_PV, -2 * 8); /* builtin_get_exceptionptrptr */
+ M_JSR(REG_RA, REG_PV);
+ disp = -(s4) (mcodeptr - (s4 *) cs) * 4;
+ M_LDA(REG_PV, REG_RA, disp);
+ M_MOV(REG_RESULT, REG_ITMP3);
+ if (IS_FLT_DBL_TYPE(m->returntype))
+ M_DLD(REG_FRESULT, REG_SP, 1 * 8);
+ else
+ M_ALD(REG_RESULT, REG_SP, 1 * 8);
+#else
M_ALD(REG_ITMP3, REG_PV, -2 * 8); /* get address of exceptionptr */
+#endif
M_ALD(REG_ITMP1, REG_ITMP3, 0); /* load exception into reg. itmp1 */
M_BNEZ(REG_ITMP1, 3); /* if no exception then return */
- M_ALD(REG_RA, REG_SP, 0); /* load return address */
- M_LDA(REG_SP, REG_SP, 8); /* remove stackframe */
+ M_ALD(REG_RA, REG_SP, 0 * 8); /* load return address */
+ M_LDA(REG_SP, REG_SP, NATIVESTUBSTACK * 8); /* remove stackframe */
M_RET(REG_ZERO, REG_RA); /* return to caller */
M_AST(REG_ZERO, REG_ITMP3, 0); /* store NULL into exceptionptr */
- M_ALD(REG_RA, REG_SP, 0); /* load return address */
- M_LDA(REG_SP, REG_SP, 8); /* remove stackframe */
+ M_ALD(REG_RA, REG_SP, 0 * 8); /* load return address */
+ M_LDA(REG_SP, REG_SP, NATIVESTUBSTACK * 8); /* remove stackframe */
M_LDA(REG_ITMP2, REG_RA, -4); /* move fault address into reg. itmp2 */
M_ALD(REG_ITMP3, REG_PV, -3 * 8); /* load asm exception handler address */
M_JMP(REG_ZERO, REG_ITMP3); /* jump to asm exception handler */
count_nstub_len += NATIVESTUBSIZE * 8;
#endif
- return (u1*) (s + NATIVESTUBOFFSET);
+ return (u1 *) (s + NATIVESTUBOFFSET);
}
void removenativestub(u1 *stub)
{
- CFREE((u8*) stub - NATIVESTUBOFFSET, NATIVESTUBSIZE * 8);
+ CFREE((u8 *) stub - NATIVESTUBOFFSET, NATIVESTUBSIZE * 8);
}