inline void
call16big(struct bregs *callregs)
{
- extern void __force_link_error__call16big_only_in_32bit_mode();
- if (MODE16)
- __force_link_error__call16big_only_in_32bit_mode();
-
+ ASSERT32();
asm volatile(
"calll __call16big_from32"
: "+a" (callregs), "+m" (*callregs)
__call16_int(struct bregs *callregs, u16 offset)
{
if (MODE16)
- callregs->cs = GET_SEG(CS);
+ callregs->code.seg = GET_SEG(CS);
else
- callregs->cs = SEG_BIOS;
- callregs->ip = offset;
+ callregs->code.seg = SEG_BIOS;
+ callregs->code.offset = offset;
call16(callregs);
}
inline u32
stack_hop(u32 eax, u32 edx, u32 ecx, void *func)
{
- extern void __force_link_error__stack_hop_only_in_16bit_mode();
- if (!MODE16)
- __force_link_error__stack_hop_only_in_16bit_mode();
-
+ ASSERT16();
u16 ebda_seg = get_ebda_seg(), bkup_ss;
u32 bkup_esp;
asm volatile(
void *
#undef memcpy
memcpy(void *d1, const void *s1, size_t len)
+#if MODE16 == 0
#define memcpy __builtin_memcpy
+#endif
{
+ SET_SEG(ES, GET_SEG(SS));
void *d = d1;
if (((u32)d1 | (u32)s1 | len) & 3) {
// non-aligned memcpy