|
@@ -1,7 +1,14 @@
|
|
|
+/*
|
|
|
+ * This provides an optimized implementation of memcpy, and a simplified
|
|
|
+ * implementation of memset and memmove. These are used here because the
|
|
|
+ * standard kernel runtime versions are not yet available and we don't
|
|
|
+ * trust the gcc built-in implementations as they may do unexpected things
|
|
|
+ * (e.g. FPU ops) in the minimal decompression stub execution environment.
|
|
|
+ */
|
|
|
#include "../string.c"
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
-void *__memcpy(void *dest, const void *src, size_t n)
|
|
|
+void *memcpy(void *dest, const void *src, size_t n)
|
|
|
{
|
|
|
int d0, d1, d2;
|
|
|
asm volatile(
|
|
@@ -15,7 +22,7 @@ void *__memcpy(void *dest, const void *src, size_t n)
|
|
|
return dest;
|
|
|
}
|
|
|
#else
|
|
|
-void *__memcpy(void *dest, const void *src, size_t n)
|
|
|
+void *memcpy(void *dest, const void *src, size_t n)
|
|
|
{
|
|
|
long d0, d1, d2;
|
|
|
asm volatile(
|
|
@@ -40,17 +47,13 @@ void *memset(void *s, int c, size_t n)
|
|
|
return s;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This memcpy is overlap safe (i.e. it is memmove without conflicting
|
|
|
- * with other definitions of memmove from the various decompressors.
|
|
|
- */
|
|
|
-void *memcpy(void *dest, const void *src, size_t n)
|
|
|
+void *memmove(void *dest, const void *src, size_t n)
|
|
|
{
|
|
|
unsigned char *d = dest;
|
|
|
const unsigned char *s = src;
|
|
|
|
|
|
if (d <= s || d - s >= n)
|
|
|
- return __memcpy(dest, src, n);
|
|
|
+ return memcpy(dest, src, n);
|
|
|
|
|
|
while (n-- > 0)
|
|
|
d[n] = s[n];
|