2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, November 17, 1995 12:13 pm PST */
18 # include "private/gc_priv.h"
21 # if defined(OS2) || defined(CX_UX)
22 # define _setjmp(b) setjmp(b)
23 # define _longjmp(b,v) longjmp(b,v)
29 # include <machine/reg.h>
33 #if defined(RS6000) || defined(POWERPC)
34 # include <ucontext.h>
37 #if defined(__MWERKS__) && !defined(POWERPC)
39 asm static void PushMacRegisters()
41 sub.w #4,sp // reserve space for one parameter.
48 # if !__option(a6frames)
49 // <pcb> perhaps a6 should be pushed if stack frames are not being used.
53 // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
66 add.w #4,sp // fix stack.
70 #endif /* __MWERKS__ */
72 # if defined(SPARC) || defined(IA64)
73 /* Value returned from register flushing routine; either sp (SPARC) */
74 /* or ar.bsp (IA64) */
75 word GC_save_regs_ret_val;
78 /* Routine to mark from registers that are preserved by the C compiler. */
79 /* This must be ported to every new architecture. There is a generic */
80 /* version at the end, that is likely, but not guaranteed to work */
81 /* on your architecture. Run the test_setjmp program to see whether */
82 /* there is any chance it will work. */
84 #if !defined(USE_GENERIC_PUSH_REGS) && !defined(USE_ASM_PUSH_REGS)
89 register long TMP_SP; /* must be bound to r11 */
93 /* VAX - generic code below does not work under 4.2 */
94 /* r1 through r5 are caller save, and therefore */
95 /* on the stack or dead. */
96 asm("pushl r11"); asm("calls $1,_GC_push_one");
97 asm("pushl r10"); asm("calls $1,_GC_push_one");
98 asm("pushl r9"); asm("calls $1,_GC_push_one");
99 asm("pushl r8"); asm("calls $1,_GC_push_one");
100 asm("pushl r7"); asm("calls $1,_GC_push_one");
101 asm("pushl r6"); asm("calls $1,_GC_push_one");
102 # define HAVE_PUSH_REGS
104 # if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
105 /* M68K SUNOS - could be replaced by generic code */
106 /* a0, a1 and d1 are caller save */
107 /* and therefore are on stack or dead. */
109 asm("subqw #0x4,sp"); /* allocate word on top of stack */
111 asm("movl a2,sp@"); asm("jbsr _GC_push_one");
112 asm("movl a3,sp@"); asm("jbsr _GC_push_one");
113 asm("movl a4,sp@"); asm("jbsr _GC_push_one");
114 asm("movl a5,sp@"); asm("jbsr _GC_push_one");
115 /* Skip frame pointer and stack pointer */
116 asm("movl d1,sp@"); asm("jbsr _GC_push_one");
117 asm("movl d2,sp@"); asm("jbsr _GC_push_one");
118 asm("movl d3,sp@"); asm("jbsr _GC_push_one");
119 asm("movl d4,sp@"); asm("jbsr _GC_push_one");
120 asm("movl d5,sp@"); asm("jbsr _GC_push_one");
121 asm("movl d6,sp@"); asm("jbsr _GC_push_one");
122 asm("movl d7,sp@"); asm("jbsr _GC_push_one");
124 asm("addqw #0x4,sp"); /* put stack back where it was */
125 # define HAVE_PUSH_REGS
128 # if defined(M68K) && defined(HP)
129 /* M68K HP - could be replaced by generic code */
130 /* a0, a1 and d1 are caller save. */
132 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
134 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
135 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
136 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
137 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
138 /* Skip frame pointer and stack pointer */
139 asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
140 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
141 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
142 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
143 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
144 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
145 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
147 asm("addq.w &0x4,%sp"); /* put stack back where it was */
148 # define HAVE_PUSH_REGS
149 # endif /* M68K HP */
151 # if defined(M68K) && defined(AMIGA)
152 /* AMIGA - could be replaced by generic code */
153 /* a0, a1, d0 and d1 are caller save */
156 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
158 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
159 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
160 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
161 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
162 asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
163 /* Skip frame pointer and stack pointer */
164 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
165 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
166 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
167 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
168 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
169 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
171 asm("addq.w &0x4,%sp"); /* put stack back where it was */
172 # define HAVE_PUSH_REGS
173 # else /* !__GNUC__ */
174 GC_push_one(getreg(REG_A2));
175 GC_push_one(getreg(REG_A3));
177 /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
178 GC_push_one(getreg(REG_A4));
180 GC_push_one(getreg(REG_A5));
181 GC_push_one(getreg(REG_A6));
182 /* Skip stack pointer */
183 GC_push_one(getreg(REG_D2));
184 GC_push_one(getreg(REG_D3));
185 GC_push_one(getreg(REG_D4));
186 GC_push_one(getreg(REG_D5));
187 GC_push_one(getreg(REG_D6));
188 GC_push_one(getreg(REG_D7));
189 # define HAVE_PUSH_REGS
190 # endif /* !__GNUC__ */
193 # if defined(M68K) && defined(MACOS)
194 # if defined(THINK_C)
195 # define PushMacReg(reg) \
199 sub.w #4,sp ; reserve space for one parameter.
203 ; skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
210 add.w #4,sp ; fix stack.
212 # define HAVE_PUSH_REGS
214 # endif /* THINK_C */
215 # if defined(__MWERKS__)
217 # define HAVE_PUSH_REGS
218 # endif /* __MWERKS__ */
221 # if defined(I386) &&!defined(OS2) &&!defined(SVR4) \
222 && (defined(__MINGW32__) || !defined(MSWIN32)) \
223 && !defined(SCO) && !defined(SCO_ELF) \
224 && !(defined(LINUX) && defined(__ELF__)) \
225 && !(defined(FREEBSD) && defined(__ELF__)) \
226 && !(defined(NETBSD) && defined(__ELF__)) \
227 && !(defined(OPENBSD) && defined(__ELF__)) \
228 && !(defined(BEOS) && defined(__ELF__)) \
229 && !defined(DOS4GW) && !defined(HURD)
230 /* I386 code, generic code does not appear to work */
231 /* It does appear to work under OS2, and asms dont */
232 /* This is used for some 38g UNIX variants and for CYGWIN32 */
233 asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
234 asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
235 asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
236 asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
237 asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
238 asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
239 asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
240 # define HAVE_PUSH_REGS
243 # if ( defined(I386) && defined(LINUX) && defined(__ELF__) ) \
244 || ( defined(I386) && defined(FREEBSD) && defined(__ELF__) ) \
245 || ( defined(I386) && defined(NETBSD) && defined(__ELF__) ) \
246 || ( defined(I386) && defined(OPENBSD) && defined(__ELF__) ) \
247 || ( defined(I386) && defined(HURD) && defined(__ELF__) ) \
248 || ( defined(I386) && defined(DGUX) )
250 /* This is modified for Linux with ELF (Note: _ELF_ only) */
251 /* This section handles FreeBSD with ELF. */
252 /* Eax is caller-save and dead here. Other caller-save */
253 /* registers could also be skipped. We assume there are no */
254 /* pointers in MMX registers, etc. */
255 /* We combine instructions in a single asm to prevent gcc from */
256 /* inserting code in the middle. */
257 asm("pushl %ecx; call GC_push_one; addl $4,%esp");
258 asm("pushl %edx; call GC_push_one; addl $4,%esp");
259 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
260 asm("pushl %esi; call GC_push_one; addl $4,%esp");
261 asm("pushl %edi; call GC_push_one; addl $4,%esp");
262 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
263 # define HAVE_PUSH_REGS
266 # if ( defined(I386) && defined(BEOS) && defined(__ELF__) )
267 /* As far as I can understand from */
268 /* http://www.beunited.org/articles/jbq/nasm.shtml, */
269 /* only ebp, esi, edi and ebx are not scratch. How MMX */
270 /* etc. registers should be treated, I have no idea. */
271 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
272 asm("pushl %esi; call GC_push_one; addl $4,%esp");
273 asm("pushl %edi; call GC_push_one; addl $4,%esp");
274 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
275 # define HAVE_PUSH_REGS
278 # if defined(I386) && defined(MSWIN32) && !defined(__MINGW32__) \
279 && !defined(USE_GENERIC)
280 /* I386 code, Microsoft variant */
282 __asm call GC_push_one
285 __asm call GC_push_one
288 __asm call GC_push_one
291 __asm call GC_push_one
294 __asm call GC_push_one
297 __asm call GC_push_one
300 __asm call GC_push_one
302 # define HAVE_PUSH_REGS
305 # if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
306 /* I386 code, SVR4 variant, generic code does not appear to work */
307 asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
308 asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
309 asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
310 asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
311 asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
312 asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
313 asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
314 # define HAVE_PUSH_REGS
318 asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
319 asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
320 asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
321 asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
322 asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
323 # define HAVE_PUSH_REGS
327 GC_save_regs_ret_val = GC_save_regs_in_stack();
328 # define HAVE_PUSH_REGS
332 GC_push_one(TMP_SP); /* GC_push_one from r11 */
334 asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
335 asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
336 asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
337 asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
338 asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
340 asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
341 asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
342 asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
343 asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
344 # define HAVE_PUSH_REGS
347 # if defined(M68K) && defined(SYSV)
348 /* Once again similar to SUN and HP, though setjmp appears to work.
352 asm("subqw #0x4,%sp"); /* allocate word on top of stack */
354 asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
355 asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
356 asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
357 asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
358 /* Skip frame pointer and stack pointer */
359 asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
360 asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
361 asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
362 asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
363 asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
364 asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
365 asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
367 asm("addqw #0x4,%sp"); /* put stack back where it was */
368 # define HAVE_PUSH_REGS
369 # else /* !__GNUC__*/
370 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
372 asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
373 asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
374 asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
375 asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
376 /* Skip frame pointer and stack pointer */
377 asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
378 asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
379 asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
380 asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
381 asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
382 asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
383 asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
385 asm("addq.w &0x4,%sp"); /* put stack back where it was */
386 # define HAVE_PUSH_REGS
387 # endif /* !__GNUC__ */
388 # endif /* M68K/SYSV */
392 register int * sp asm ("optop");
393 extern int *__libc_stack_end;
395 GC_push_all_stack (sp, __libc_stack_end);
396 # define HAVE_PUSH_REGS
397 /* Isn't this redundant with the code to push the stack? */
401 /* other machines... */
402 # if !defined(HAVE_PUSH_REGS)
403 --> We just generated an empty GC_push_regs, which
404 --> is almost certainly broken. Try defining
405 --> USE_GENERIC_PUSH_REGS instead.
408 #endif /* !USE_GENERIC_PUSH_REGS && !USE_ASM_PUSH_REGS */
410 void GC_with_callee_saves_pushed(fn, arg)
416 # if defined(USE_GENERIC_PUSH_REGS)
417 # ifdef HAVE_BUILTIN_UNWIND_INIT
418 /* This was suggested by Richard Henderson as the way to */
419 /* force callee-save registers and register windows onto */
421 __builtin_unwind_init();
422 # else /* !HAVE_BUILTIN_UNWIND_INIT */
423 # if defined(RS6000) || defined(POWERPC)
424 /* FIXME: RS6000 means AIX. */
425 /* This should probably be used in all Posix/non-gcc */
426 /* settings. We defer that change to minimize risk. */
431 /* The idea is due to Parag Patel at HP. */
432 /* We're not sure whether he would like */
433 /* to be he acknowledged for it or not. */
435 register word * i = (word *) regs;
436 register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
438 /* Setjmp doesn't always clear all of the buffer. */
439 /* That tends to preserve garbage. Clear it. */
440 for (; (char *)i < lim; i++) {
443 # if defined(MSWIN32) || defined(MSWINCE) \
444 || defined(UTS4) || defined(LINUX) || defined(EWS4800)
447 (void) _setjmp(regs);
448 /* We don't want to mess with signals. According to */
449 /* SUSV3, setjmp() may or may not save signal mask. */
450 /* _setjmp won't, but is less portable. */
452 # endif /* !AIX ... */
453 # endif /* !HAVE_BUILTIN_UNWIND_INIT */
455 # if defined(PTHREADS) && !defined(MSWIN32) /* !USE_GENERIC_PUSH_REGS */
456 /* We may still need this to save thread contexts. */
459 # else /* Shouldn't be needed */
460 ABORT("Unexpected call to GC_with_callee_saves_pushed");
463 # if (defined(SPARC) && !defined(HAVE_BUILTIN_UNWIND_INIT)) \
465 /* On a register window machine, we need to save register */
466 /* contents on the stack for this to work. The setjmp */
467 /* is probably not needed on SPARC, since pointers are */
468 /* only stored in windowed or scratch registers. It is */
469 /* needed on IA64, since some non-windowed registers are */
472 GC_save_regs_ret_val = GC_save_regs_in_stack();
473 /* On IA64 gcc, could use __builtin_ia64_flushrs() and */
474 /* __builtin_ia64_flushrs(). The latter will be done */
475 /* implicitly by __builtin_unwind_init() for gcc3.0.1 */
480 /* Strongly discourage the compiler from treating the above */
481 /* as a tail-call, since that would pop the register */
482 /* contents before we get a chance to look at them. */
483 GC_noop1((word)(&dummy));
486 #if defined(USE_GENERIC_PUSH_REGS)
487 void GC_generic_push_regs(cold_gc_frame)
490 GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
492 #endif /* USE_GENERIC_PUSH_REGS */
494 /* On register window machines, we need a way to force registers into */
495 /* the stack. Return sp. */
497 __asm__(" .seg \"text\"");
498 # if defined(SVR4) || defined(NETBSD) || defined(FREEBSD)
499 __asm__(" .globl GC_save_regs_in_stack");
500 __asm__("GC_save_regs_in_stack:");
501 __asm__(" .type GC_save_regs_in_stack,#function");
503 __asm__(" .globl _GC_save_regs_in_stack");
504 __asm__("_GC_save_regs_in_stack:");
506 # if defined(__arch64__) || defined(__sparcv9)
507 __asm__(" save %sp,-128,%sp");
510 __asm__(" restore %sp,2047+128,%o0");
512 __asm__(" ta 0x3 ! ST_FLUSH_WINDOWS");
514 __asm__(" mov %sp,%o0");
517 __asm__(" .GC_save_regs_in_stack_end:");
518 __asm__(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
521 word GC_save_regs_in_stack() { return(0 /* sp really */);}
525 /* On IA64, we also need to flush register windows. But they end */
526 /* up on the other side of the stack segment. */
527 /* Returns the backing store pointer for the register stack. */
528 /* We now implement this as a separate assembly file, since inline */
529 /* assembly code here doesn't work with either the Intel or HP */
540 asm(" .global GC_save_regs_in_stack");
541 asm(" .proc GC_save_regs_in_stack");
542 asm("GC_save_regs_in_stack:");
546 asm(" mov r8=ar.bsp");
547 asm(" br.ret.sptk.few rp");
548 asm(" .endp GC_save_regs_in_stack");
550 # if 0 /* Other alternatives that don't work on HP/UX */
551 word GC_save_regs_in_stack() {
553 __builtin_ia64_flushrs();
554 return __builtin_ia64_bsp();
559 _asm(" mov r8=ar.bsp");
560 _asm(" br.ret.sptk.few rp");
564 asm(" mov r8=ar.bsp");
565 asm(" br.ret.sptk.few rp");
572 /* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
573 /* returns arg. Stack clearing is crucial on SPARC, so we supply */
574 /* an assembly version that's more careful. Assumes limit is hotter */
575 /* than sp, and limit is 8 byte aligned. */
576 #if defined(ASM_CLEAR_CODE)
581 __asm__(".globl _GC_clear_stack_inner");
582 __asm__("_GC_clear_stack_inner:");
584 __asm__(".globl GC_clear_stack_inner");
585 __asm__("GC_clear_stack_inner:");
586 __asm__(".type GC_save_regs_in_stack,#function");
588 #if defined(__arch64__) || defined(__sparcv9)
589 __asm__("mov %sp,%o2"); /* Save sp */
590 __asm__("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
591 __asm__("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
592 /* so that traps still work. */
593 /* Includes some extra words */
594 /* so we can be sloppy below. */
596 __asm__("stx %g0,[%o3]"); /* *(long *)p = 0 */
597 __asm__("cmp %o3,%o1");
598 __asm__("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
599 __asm__("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
601 __asm__("mov %o2,%sp"); /* Restore sp., delay slot */
603 __asm__("mov %sp,%o2"); /* Save sp */
604 __asm__("add %sp,-8,%o3"); /* p = sp-8 */
605 __asm__("clr %g1"); /* [g0,g1] = 0 */
606 __asm__("add %o1,-0x60,%sp"); /* Move sp out of the way, */
607 /* so that traps still work. */
608 /* Includes some extra words */
609 /* so we can be sloppy below. */
611 __asm__("std %g0,[%o3]"); /* *(long long *)p = 0 */
612 __asm__("cmp %o3,%o1");
613 __asm__("bgu loop "); /* if (p > limit) goto loop */
614 __asm__("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
616 __asm__("mov %o2,%sp"); /* Restore sp., delay slot */
617 #endif /* old SPARC */
618 /* First argument = %o0 = return value */
620 __asm__(" .GC_clear_stack_inner_end:");
621 __asm__(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
626 ptr_t GC_clear_stack_inner(arg, limit)
627 ptr_t arg; word limit;