#include "util.h" // call16
#include "bregs.h" // struct bregs
-#include "farptr.h" // GET_FLATPTR
-#include "biosvar.h" // get_ebda_seg
-
-static inline u32 getesp() {
- u32 esp;
- asm("movl %%esp, %0" : "=rm"(esp));
- return esp;
-}
+#include "config.h" // BUILD_STACK_ADDR
/****************************************************************
inline void
call16(struct bregs *callregs)
{
- if (!MODE16 && getesp() > BUILD_STACK_ADDR)
+ if (!MODESEGMENT && getesp() > BUILD_STACK_ADDR)
panic("call16 with invalid stack\n");
asm volatile(
#if MODE16 == 1
inline void
call16big(struct bregs *callregs)
{
- ASSERT32();
+ ASSERT32FLAT();
if (getesp() > BUILD_STACK_ADDR)
panic("call16 with invalid stack\n");
asm volatile(
inline void
__call16_int(struct bregs *callregs, u16 offset)
{
- if (MODE16)
+ if (MODESEGMENT)
callregs->code.seg = GET_SEG(CS);
else
callregs->code.seg = SEG_BIOS;
call16(callregs);
}
-// Switch to the extra stack in ebda and call a function.
-inline u32
-stack_hop(u32 eax, u32 edx, u32 ecx, void *func)
-{
- ASSERT16();
- u16 ebda_seg = get_ebda_seg(), bkup_ss;
- u32 bkup_esp;
- asm volatile(
- // Backup current %ss/%esp values.
- "movw %%ss, %w3\n"
- "movl %%esp, %4\n"
- // Copy ebda seg to %ds/%ss and set %esp
- "movw %w6, %%ds\n"
- "movw %w6, %%ss\n"
- "movl %5, %%esp\n"
- // Call func
- "calll %7\n"
- // Restore segments and stack
- "movw %w3, %%ds\n"
- "movw %w3, %%ss\n"
- "movl %4, %%esp"
- : "+a" (eax), "+d" (edx), "+c" (ecx), "=&r" (bkup_ss), "=&r" (bkup_esp)
- : "i" (EBDA_OFFSET_TOP_STACK), "r" (ebda_seg), "m" (*(u8*)func)
- : "cc", "memory");
- return eax;
-}
-
-// 16bit trampoline for enabling irqs from 32bit mode.
-ASM16(
- " .global trampoline_checkirqs\n"
- "trampoline_checkirqs:\n"
- " rep ; nop\n"
- " lretw"
- );
-
-static void
-check_irqs32()
-{
- extern void trampoline_checkirqs();
- struct bregs br;
- br.flags = F_IF;
- br.code.seg = SEG_BIOS;
- br.code.offset = (u32)&trampoline_checkirqs;
- call16big(&br);
-}
-
-static void
-check_irqs16()
-{
- asm volatile(
- "sti\n"
- "nop\n"
- "rep ; nop\n"
- "cli\n"
- "cld\n"
- : : :"memory");
-}
-
-
-/****************************************************************
- * Threads
- ****************************************************************/
-
-#define THREADSTACKSIZE 4096
-
-struct thread_info {
- struct thread_info *next;
- void *stackpos;
-};
-
-struct thread_info MainThread;
-
-void
-thread_setup()
-{
- MainThread.next = &MainThread;
- MainThread.stackpos = NULL;
-}
-
-struct thread_info *
-getCurThread()
-{
- u32 esp = getesp();
- if (esp <= BUILD_STACK_ADDR)
- return &MainThread;
- return (void*)ALIGN_DOWN(esp, THREADSTACKSIZE);
-}
-
-// Briefly permit irqs to occur.
-void
-yield()
-{
- if (MODE16) {
- // In 16bit mode, just directly check irqs.
- check_irqs16();
- return;
- }
- if (! CONFIG_THREADS) {
- check_irqs32();
- return;
- }
- struct thread_info *cur = getCurThread();
- if (cur == &MainThread)
- // Permit irqs to fire
- check_irqs32();
-
- // Switch to the next thread
- struct thread_info *next = cur->next;
- asm volatile(
- " pushl $1f\n" // store return pc
- " pushl %%ebp\n" // backup %ebp
- " movl %%esp, 4(%%eax)\n" // cur->stackpos = %esp
- " movl 4(%%ecx), %%esp\n" // %esp = next->stackpos
- " popl %%ebp\n" // restore %ebp
- " retl\n" // restore pc
- "1:\n"
- : "+a"(cur), "+c"(next)
- :
- : "ebx", "edx", "esi", "edi", "cc", "memory");
-}
-
-// Last thing called from a thread (called on "next" stack).
-static void
-__end_thread(struct thread_info *old)
-{
- struct thread_info *pos = &MainThread;
- while (pos->next != old)
- pos = pos->next;
- pos->next = old->next;
- free(old);
- dprintf(DEBUG_thread, "\\%08x/ End thread\n", (u32)old);
-}
-
-void
-run_thread(void (*func)(void*), void *data)
-{
- ASSERT32();
- if (! CONFIG_THREADS)
- goto fail;
- struct thread_info *thread;
- thread = memalign_tmphigh(THREADSTACKSIZE, THREADSTACKSIZE);
- if (!thread)
- goto fail;
-
- thread->stackpos = (void*)thread + THREADSTACKSIZE;
- struct thread_info *cur = getCurThread();
- thread->next = cur->next;
- cur->next = thread;
-
- dprintf(DEBUG_thread, "/%08x\\ Start thread\n", (u32)thread);
- asm volatile(
- // Start thread
- " pushl $1f\n" // store return pc
- " pushl %%ebp\n" // backup %ebp
- " movl %%esp, 4(%%edx)\n" // cur->stackpos = %esp
- " movl 4(%%ebx), %%esp\n" // %esp = thread->stackpos
- " calll *%%ecx\n" // Call func
-
- // End thread
- " movl (%%ebx), %%ecx\n" // %ecx = thread->next
- " movl 4(%%ecx), %%esp\n" // %esp = next->stackpos
- " movl %%ebx, %%eax\n"
- " calll %4\n" // call __end_thread(thread)
- " popl %%ebp\n" // restore %ebp
- " retl\n" // restore pc
- "1:\n"
- : "+a"(data), "+c"(func), "+b"(thread), "+d"(cur)
- : "m"(*(u8*)__end_thread)
- : "esi", "edi", "cc", "memory");
- return;
-
-fail:
- func(data);
-}
-
-void
-wait_threads()
-{
- ASSERT32();
- if (! CONFIG_THREADS)
- return;
- while (MainThread.next != &MainThread)
- yield();
-}
-
/****************************************************************
* String ops
return s;
}
+void memset_fl(void *ptr, u8 val, size_t size)
+{
+ if (MODESEGMENT)
+ memset_far(FLATPTR_TO_SEG(ptr), (void*)(FLATPTR_TO_OFFSET(ptr)),
+ val, size);
+ else
+ memset(ptr, val, size);
+}
+
inline void
memcpy_far(u16 d_seg, void *d_far, u16 s_seg, const void *s_far, size_t len)
{
: "cc", "memory");
}
+inline void
+memcpy_fl(void *d_fl, const void *s_fl, size_t len)
+{
+ if (MODESEGMENT)
+ memcpy_far(FLATPTR_TO_SEG(d_fl), (void*)FLATPTR_TO_OFFSET(d_fl)
+ , FLATPTR_TO_SEG(s_fl), (void*)FLATPTR_TO_OFFSET(s_fl)
+ , len);
+ else
+ memcpy(d_fl, s_fl, len);
+}
+
void *
#undef memcpy
memcpy(void *d1, const void *s1, size_t len)
-#if MODE16 == 0
+#if MODESEGMENT == 0
#define memcpy __builtin_memcpy
#endif
{
return d1;
}
-// Copy from memory mapped IO. IO mem is very slow, so yield
-// periodically. 'len' must be 4 byte aligned.
+// Copy to/from memory mapped IO. IO mem is very slow, so yield
+// periodically.
void
iomemcpy(void *d, const void *s, u32 len)
{
yield();
- while (len) {
+ while (len > 3) {
u32 copylen = len;
- if (copylen > 1024)
- copylen = 1024;
- len -= copylen;
+ if (copylen > 2048)
+ copylen = 2048;
copylen /= 4;
+ len -= copylen * 4;
asm volatile(
"rep movsl (%%esi),%%es:(%%edi)"
: "+c"(copylen), "+S"(s), "+D"(d)
: : "cc", "memory");
yield();
}
+ if (len)
+ // Copy any remaining bytes.
+ memcpy(d, s, len);
}
void *
strtcpy(char *dest, const char *src, size_t len)
{
char *d = dest;
- while (len-- && *src != '\0')
+ while (--len && *src != '\0')
*d++ = *src++;
*d = '\0';
return dest;
}
+// locate first occurance of character c in the string s
+char *
+strchr(const char *s, int c)
+{
+ for (; *s; s++)
+ if (*s == c)
+ return (char*)s;
+ return NULL;
+}
-/****************************************************************
- * Keyboard calls
- ****************************************************************/
-
-// Wait for 'usec' microseconds using (with irqs enabled) using int 1586.
+// Remove any trailing blank characters (spaces, new lines, carriage returns)
void
-biosusleep(u32 usec)
+nullTrailingSpace(char *buf)
{
- struct bregs br;
- memset(&br, 0, sizeof(br));
- br.flags = F_IF;
- br.ah = 0x86;
- br.cx = usec >> 16;
- br.dx = usec;
- call16_int(0x15, &br);
+ int len = strlen(buf);
+ char *end = &buf[len-1];
+ while (end >= buf && *end <= ' ')
+ *(end--) = '\0';
}
+/****************************************************************
+ * Keyboard calls
+ ****************************************************************/
+
// See if a keystroke is pending in the keyboard buffer.
static int
-check_for_keystroke()
+check_for_keystroke(void)
{
struct bregs br;
memset(&br, 0, sizeof(br));
// Return a keystroke - waiting forever if necessary.
static int
-get_raw_keystroke()
+get_raw_keystroke(void)
{
struct bregs br;
memset(&br, 0, sizeof(br));
int
get_keystroke(int msec)
{
+ u32 end = calc_future_timer(msec);
for (;;) {
if (check_for_keystroke())
return get_raw_keystroke();
- if (msec <= 0)
+ if (check_timer(end))
return -1;
- biosusleep(50*1000);
- msec -= 50;
+ wait_irq();
}
}