// Misc utility functions.
//
-// Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
+// Copyright (C) 2008,2009 Kevin O'Connor <kevin@koconnor.net>
//
// This file may be distributed under the terms of the GNU LGPLv3 license.
asm volatile(
#if MODE16 == 1
"calll __call16\n"
+ "cli\n"
+ "cld"
#else
- "calll __call16_from32\n"
+ "calll __call16_from32"
#endif
: "+a" (callregs), "+m" (*callregs)
:
__force_link_error__call16big_only_in_32bit_mode();
asm volatile(
- "calll __call16big_from32\n"
+ "calll __call16big_from32"
: "+a" (callregs), "+m" (*callregs)
:
: "ebx", "ecx", "edx", "esi", "edi", "cc", "memory");
call16(callregs);
}
-inline void
-call16_simpint(int nr, u32 *eax, u32 *flags)
-{
- extern void __force_link_error__call16_simpint_only_in_16bit_mode();
- if (!MODE16)
- __force_link_error__call16_simpint_only_in_16bit_mode();
-
- asm volatile(
- "stc\n"
- "int %2\n"
- "pushfl\n"
- "popl %1\n"
- "cld\n"
- "cli\n"
- : "+a"(*eax), "=r"(*flags)
- : "i"(nr)
- : "cc", "memory");
-}
-
// Switch to the extra stack in ebda and call a function.
inline u32
stack_hop(u32 eax, u32 edx, u32 ecx, void *func)
// Restore segments and stack
"movw %w3, %%ds\n"
"movw %w3, %%ss\n"
- "movl %4, %%esp\n"
+ "movl %4, %%esp"
: "+a" (eax), "+d" (edx), "+c" (ecx), "=&r" (bkup_ss), "=&r" (bkup_esp)
: "i" (EBDA_OFFSET_TOP_STACK), "r" (ebda_seg), "m" (*(u8*)func)
: "cc", "memory");
return checksum_far(GET_SEG(SS), buf, len);
}
+size_t
+strlen(const char *s)
+{
+ if (__builtin_constant_p(s))
+ return __builtin_strlen(s);
+ const char *p = s;
+ while (*p)
+ p++;
+ return p-s;
+}
+
+// Compare two areas of memory.
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+ while (n) {
+ if (*(u8*)s1 != *(u8*)s2)
+ return *(u8*)s1 < *(u8*)s2 ? -1 : 1;
+ s1++;
+ s2++;
+ n--;
+ }
+ return 0;
+}
+
+// Compare two strings.
+int
+strcmp(const char *s1, const char *s2)
+{
+ for (;;) {
+ if (*s1 != *s2)
+ return *s1 < *s2 ? -1 : 1;
+ if (! *s1)
+ return 0;
+ s1++;
+ s2++;
+ }
+}
+
+inline void
+memset_far(u16 d_seg, void *d_far, u8 c, size_t len)
+{
+ SET_SEG(ES, d_seg);
+ asm volatile(
+ "rep stosb %%es:(%%di)"
+ : "+c"(len), "+D"(d_far)
+ : "a"(c)
+ : "cc", "memory");
+}
+
+inline void
+memset16_far(u16 d_seg, void *d_far, u16 c, size_t len)
+{
+ len /= 2;
+ SET_SEG(ES, d_seg);
+ asm volatile(
+ "rep stosw %%es:(%%di)"
+ : "+c"(len), "+D"(d_far)
+ : "a"(c)
+ : "cc", "memory");
+}
+
void *
memset(void *s, int c, size_t n)
{
"movw %%ds, %w0\n"
"movw %w4, %%ds\n"
"rep movsb (%%si),%%es:(%%di)\n"
- "movw %w0, %%ds\n"
+ "movw %w0, %%ds"
: "=&r"(bkup_ds), "+c"(len), "+S"(s_far), "+D"(d_far)
: "r"(s_seg)
: "cc", "memory");
}
void *
+#undef memcpy
memcpy(void *d1, const void *s1, size_t len)
+#define memcpy __builtin_memcpy
{
- u8 *d = (u8*)d1, *s = (u8*)s1;
- while (len--)
- *d++ = *s++;
+ void *d = d1;
+ if (((u32)d1 | (u32)s1 | len) & 3) {
+ // non-aligned memcpy
+ asm volatile(
+ "rep movsb (%%esi),%%es:(%%edi)"
+ : "+c"(len), "+S"(s1), "+D"(d)
+ : : "cc", "memory");
+ return d1;
+ }
+ // Common case - use 4-byte copy
+ len /= 4;
+ asm volatile(
+ "rep movsl (%%esi),%%es:(%%edi)"
+ : "+c"(len), "+S"(s1), "+D"(d)
+ : : "cc", "memory");
return d1;
}