+#include "bregs.h" // CR0_PE
+
+// Thread info - stored at bottom of each thread stack - don't change
+// without also updating the inline assembler below.
+struct thread_info {
+ struct thread_info *next;
+ void *stackpos;
+ struct thread_info **pprev;
+};
+struct thread_info VAR32FLATVISIBLE MainThread = {
+ &MainThread, NULL, &MainThread.next
+};
+
+
+/****************************************************************
+ * Low level helpers
+ ****************************************************************/
+
+static inline void sgdt(struct descloc_s *desc) {
+ asm("sgdtl %0" : "=m"(*desc));
+}
+static inline void lgdt(struct descloc_s *desc) {
+ asm("lgdtl %0" : : "m"(*desc) : "memory");
+}
+
+// Call a 32bit SeaBIOS function from a 16bit SeaBIOS function.
+u32 VISIBLE16
+call32(void *func, u32 eax, u32 errret)
+{
+ ASSERT16();
+ u32 cr0 = getcr0();
+ if (cr0 & CR0_PE)
+ // Called in 16bit protected mode?!
+ return errret;
+
+ // Backup cmos index register and disable nmi
+ u8 cmosindex = inb(PORT_CMOS_INDEX);
+ outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
+ inb(PORT_CMOS_DATA);
+
+ // Backup fs/gs and gdt
+ u16 fs = GET_SEG(FS), gs = GET_SEG(GS);
+ struct descloc_s gdt;
+ sgdt(&gdt);
+
+ u32 bkup_ss, bkup_esp;
+ asm volatile(
+ // Backup ss/esp / set esp to flat stack location
+ " movl %%ss, %0\n"
+ " movl %%esp, %1\n"
+ " shll $4, %0\n"
+ " addl %0, %%esp\n"
+ " shrl $4, %0\n"
+
+ // Transition to 32bit mode, call func, return to 16bit
+ " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n"
+ " jmp transition32\n"
+ " .code32\n"
+ "1:calll *%3\n"
+ " movl $2f, %%edx\n"
+ " jmp transition16big\n"
+
+ // Restore ds/ss/esp
+ " .code16gcc\n"
+ "2:movl %0, %%ds\n"
+ " movl %0, %%ss\n"
+ " movl %1, %%esp\n"
+ : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax)
+ : "r" (func)
+ : "ecx", "edx", "cc", "memory");
+
+ // Restore gdt and fs/gs
+ lgdt(&gdt);
+ SET_SEG(FS, fs);
+ SET_SEG(GS, gs);
+
+ // Restore cmos index register
+ outb(cmosindex, PORT_CMOS_INDEX);
+ inb(PORT_CMOS_DATA);
+ return eax;
+}
+
+// 16bit trampoline for enabling irqs from 32bit mode.
+ASM16(
+ " .global trampoline_checkirqs\n"
+ "trampoline_checkirqs:\n"
+ " rep ; nop\n"
+ " lretw"
+ );
+
+static void
+check_irqs(void)
+{
+ if (MODESEGMENT) {
+ asm volatile(
+ "sti\n"
+ "nop\n"
+ "rep ; nop\n"
+ "cli\n"
+ "cld\n"
+ : : :"memory");
+ return;
+ }
+ extern void trampoline_checkirqs();
+ struct bregs br;
+ br.flags = F_IF;
+ br.code.seg = SEG_BIOS;
+ br.code.offset = (u32)&trampoline_checkirqs;
+ call16big(&br);
+}
+
+// 16bit trampoline for waiting for an irq from 32bit mode.
+ASM16(
+ " .global trampoline_waitirq\n"
+ "trampoline_waitirq:\n"
+ " sti\n"
+ " hlt\n"
+ " lretw"
+ );
+
+// Wait for next irq to occur.
+void
+wait_irq(void)
+{
+ if (MODESEGMENT) {
+ asm volatile("sti ; hlt ; cli ; cld": : :"memory");
+ return;
+ }
+ if (CONFIG_THREADS && MainThread.next != &MainThread) {
+ // Threads still active - do a yield instead.
+ yield();
+ return;
+ }
+ extern void trampoline_waitirq();
+ struct bregs br;
+ br.flags = 0;
+ br.code.seg = SEG_BIOS;
+ br.code.offset = (u32)&trampoline_waitirq;
+ call16big(&br);
+}