|
@@ -102,9 +102,9 @@ __xtensa_copy_user:
|
|
|
bltui a4, 7, .Lbytecopy # do short copies byte by byte
|
|
|
|
|
|
# copy 1 byte
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
addi a3, a3, 1
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
addi a5, a5, 1
|
|
|
addi a4, a4, -1
|
|
|
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
|
|
@@ -112,11 +112,11 @@ __xtensa_copy_user:
|
|
|
.Ldst2mod4: # dst 16-bit aligned
|
|
|
# copy 2 bytes
|
|
|
bltui a4, 6, .Lbytecopy # do short copies byte by byte
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
- EX(l8ui, a7, a3, 1, l_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
+ EX(l8ui, a7, a3, 1, fixup)
|
|
|
addi a3, a3, 2
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
- EX(s8i, a7, a5, 1, s_fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
+ EX(s8i, a7, a5, 1, fixup)
|
|
|
addi a5, a5, 2
|
|
|
addi a4, a4, -2
|
|
|
j .Ldstaligned # dst is now aligned, return to main algorithm
|
|
@@ -135,9 +135,9 @@ __xtensa_copy_user:
|
|
|
add a7, a3, a4 # a7 = end address for source
|
|
|
#endif /* !XCHAL_HAVE_LOOPS */
|
|
|
.Lnextbyte:
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
addi a3, a3, 1
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
addi a5, a5, 1
|
|
|
#if !XCHAL_HAVE_LOOPS
|
|
|
blt a3, a7, .Lnextbyte
|
|
@@ -161,15 +161,15 @@ __xtensa_copy_user:
|
|
|
add a8, a8, a3 # a8 = end of last 16B source chunk
|
|
|
#endif /* !XCHAL_HAVE_LOOPS */
|
|
|
.Loop1:
|
|
|
- EX(l32i, a6, a3, 0, l_fixup)
|
|
|
- EX(l32i, a7, a3, 4, l_fixup)
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
- EX(l32i, a6, a3, 8, l_fixup)
|
|
|
- EX(s32i, a7, a5, 4, s_fixup)
|
|
|
- EX(l32i, a7, a3, 12, l_fixup)
|
|
|
- EX(s32i, a6, a5, 8, s_fixup)
|
|
|
+ EX(l32i, a6, a3, 0, fixup)
|
|
|
+ EX(l32i, a7, a3, 4, fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
+ EX(l32i, a6, a3, 8, fixup)
|
|
|
+ EX(s32i, a7, a5, 4, fixup)
|
|
|
+ EX(l32i, a7, a3, 12, fixup)
|
|
|
+ EX(s32i, a6, a5, 8, fixup)
|
|
|
addi a3, a3, 16
|
|
|
- EX(s32i, a7, a5, 12, s_fixup)
|
|
|
+ EX(s32i, a7, a5, 12, fixup)
|
|
|
addi a5, a5, 16
|
|
|
#if !XCHAL_HAVE_LOOPS
|
|
|
blt a3, a8, .Loop1
|
|
@@ -177,31 +177,31 @@ __xtensa_copy_user:
|
|
|
.Loop1done:
|
|
|
bbci.l a4, 3, .L2
|
|
|
# copy 8 bytes
|
|
|
- EX(l32i, a6, a3, 0, l_fixup)
|
|
|
- EX(l32i, a7, a3, 4, l_fixup)
|
|
|
+ EX(l32i, a6, a3, 0, fixup)
|
|
|
+ EX(l32i, a7, a3, 4, fixup)
|
|
|
addi a3, a3, 8
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
- EX(s32i, a7, a5, 4, s_fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
+ EX(s32i, a7, a5, 4, fixup)
|
|
|
addi a5, a5, 8
|
|
|
.L2:
|
|
|
bbci.l a4, 2, .L3
|
|
|
# copy 4 bytes
|
|
|
- EX(l32i, a6, a3, 0, l_fixup)
|
|
|
+ EX(l32i, a6, a3, 0, fixup)
|
|
|
addi a3, a3, 4
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
addi a5, a5, 4
|
|
|
.L3:
|
|
|
bbci.l a4, 1, .L4
|
|
|
# copy 2 bytes
|
|
|
- EX(l16ui, a6, a3, 0, l_fixup)
|
|
|
+ EX(l16ui, a6, a3, 0, fixup)
|
|
|
addi a3, a3, 2
|
|
|
- EX(s16i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s16i, a6, a5, 0, fixup)
|
|
|
addi a5, a5, 2
|
|
|
.L4:
|
|
|
bbci.l a4, 0, .L5
|
|
|
# copy 1 byte
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
.L5:
|
|
|
movi a2, 0 # return success for len bytes copied
|
|
|
retw
|
|
@@ -217,7 +217,7 @@ __xtensa_copy_user:
|
|
|
# copy 16 bytes per iteration for word-aligned dst and unaligned src
|
|
|
and a10, a3, a8 # save unalignment offset for below
|
|
|
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
|
|
|
- EX(l32i, a6, a3, 0, l_fixup) # load first word
|
|
|
+ EX(l32i, a6, a3, 0, fixup) # load first word
|
|
|
#if XCHAL_HAVE_LOOPS
|
|
|
loopnez a7, .Loop2done
|
|
|
#else /* !XCHAL_HAVE_LOOPS */
|
|
@@ -226,19 +226,19 @@ __xtensa_copy_user:
|
|
|
add a12, a12, a3 # a12 = end of last 16B source chunk
|
|
|
#endif /* !XCHAL_HAVE_LOOPS */
|
|
|
.Loop2:
|
|
|
- EX(l32i, a7, a3, 4, l_fixup)
|
|
|
- EX(l32i, a8, a3, 8, l_fixup)
|
|
|
+ EX(l32i, a7, a3, 4, fixup)
|
|
|
+ EX(l32i, a8, a3, 8, fixup)
|
|
|
ALIGN( a6, a6, a7)
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
- EX(l32i, a9, a3, 12, l_fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
+ EX(l32i, a9, a3, 12, fixup)
|
|
|
ALIGN( a7, a7, a8)
|
|
|
- EX(s32i, a7, a5, 4, s_fixup)
|
|
|
- EX(l32i, a6, a3, 16, l_fixup)
|
|
|
+ EX(s32i, a7, a5, 4, fixup)
|
|
|
+ EX(l32i, a6, a3, 16, fixup)
|
|
|
ALIGN( a8, a8, a9)
|
|
|
- EX(s32i, a8, a5, 8, s_fixup)
|
|
|
+ EX(s32i, a8, a5, 8, fixup)
|
|
|
addi a3, a3, 16
|
|
|
ALIGN( a9, a9, a6)
|
|
|
- EX(s32i, a9, a5, 12, s_fixup)
|
|
|
+ EX(s32i, a9, a5, 12, fixup)
|
|
|
addi a5, a5, 16
|
|
|
#if !XCHAL_HAVE_LOOPS
|
|
|
blt a3, a12, .Loop2
|
|
@@ -246,39 +246,39 @@ __xtensa_copy_user:
|
|
|
.Loop2done:
|
|
|
bbci.l a4, 3, .L12
|
|
|
# copy 8 bytes
|
|
|
- EX(l32i, a7, a3, 4, l_fixup)
|
|
|
- EX(l32i, a8, a3, 8, l_fixup)
|
|
|
+ EX(l32i, a7, a3, 4, fixup)
|
|
|
+ EX(l32i, a8, a3, 8, fixup)
|
|
|
ALIGN( a6, a6, a7)
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
addi a3, a3, 8
|
|
|
ALIGN( a7, a7, a8)
|
|
|
- EX(s32i, a7, a5, 4, s_fixup)
|
|
|
+ EX(s32i, a7, a5, 4, fixup)
|
|
|
addi a5, a5, 8
|
|
|
mov a6, a8
|
|
|
.L12:
|
|
|
bbci.l a4, 2, .L13
|
|
|
# copy 4 bytes
|
|
|
- EX(l32i, a7, a3, 4, l_fixup)
|
|
|
+ EX(l32i, a7, a3, 4, fixup)
|
|
|
addi a3, a3, 4
|
|
|
ALIGN( a6, a6, a7)
|
|
|
- EX(s32i, a6, a5, 0, s_fixup)
|
|
|
+ EX(s32i, a6, a5, 0, fixup)
|
|
|
addi a5, a5, 4
|
|
|
mov a6, a7
|
|
|
.L13:
|
|
|
add a3, a3, a10 # readjust a3 with correct misalignment
|
|
|
bbci.l a4, 1, .L14
|
|
|
# copy 2 bytes
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
- EX(l8ui, a7, a3, 1, l_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
+ EX(l8ui, a7, a3, 1, fixup)
|
|
|
addi a3, a3, 2
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
- EX(s8i, a7, a5, 1, s_fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
+ EX(s8i, a7, a5, 1, fixup)
|
|
|
addi a5, a5, 2
|
|
|
.L14:
|
|
|
bbci.l a4, 0, .L15
|
|
|
# copy 1 byte
|
|
|
- EX(l8ui, a6, a3, 0, l_fixup)
|
|
|
- EX(s8i, a6, a5, 0, s_fixup)
|
|
|
+ EX(l8ui, a6, a3, 0, fixup)
|
|
|
+ EX(s8i, a6, a5, 0, fixup)
|
|
|
.L15:
|
|
|
movi a2, 0 # return success for len bytes copied
|
|
|
retw
|
|
@@ -291,30 +291,10 @@ __xtensa_copy_user:
|
|
|
* bytes_copied = a5 - a2
|
|
|
* retval = bytes_not_copied = original len - bytes_copied
|
|
|
* retval = a11 - (a5 - a2)
|
|
|
- *
|
|
|
- * Clearing the remaining pieces of kernel memory plugs security
|
|
|
- * holes. This functionality is the equivalent of the *_zeroing
|
|
|
- * functions that some architectures provide.
|
|
|
*/
|
|
|
|
|
|
-.Lmemset:
|
|
|
- .word memset
|
|
|
|
|
|
-s_fixup:
|
|
|
+fixup:
|
|
|
sub a2, a5, a2 /* a2 <-- bytes copied */
|
|
|
sub a2, a11, a2 /* a2 <-- bytes not copied */
|
|
|
retw
|
|
|
-
|
|
|
-l_fixup:
|
|
|
- sub a2, a5, a2 /* a2 <-- bytes copied */
|
|
|
- sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
|
|
|
-
|
|
|
- /* void *memset(void *s, int c, size_t n); */
|
|
|
- mov a6, a5 /* s */
|
|
|
- movi a7, 0 /* c */
|
|
|
- mov a8, a2 /* n */
|
|
|
- l32r a4, .Lmemset
|
|
|
- callx4 a4
|
|
|
- /* Ignore memset return value in a6. */
|
|
|
- /* a2 still contains bytes not copied. */
|
|
|
- retw
|