string_64.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_STRING_64_H
  3. #define _ASM_X86_STRING_64_H
  4. #ifdef __KERNEL__
  5. #include <linux/jump_label.h>
  6. /* Written 2002 by Andi Kleen */
  7. /* Only used for special circumstances. Stolen from i386/string.h */
  8. static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
  9. {
  10. unsigned long d0, d1, d2;
  11. asm volatile("rep ; movsl\n\t"
  12. "testb $2,%b4\n\t"
  13. "je 1f\n\t"
  14. "movsw\n"
  15. "1:\ttestb $1,%b4\n\t"
  16. "je 2f\n\t"
  17. "movsb\n"
  18. "2:"
  19. : "=&c" (d0), "=&D" (d1), "=&S" (d2)
  20. : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
  21. : "memory");
  22. return to;
  23. }
  24. /* Even with __builtin_ the compiler may decide to use the out of line
  25. function. */
  26. #define __HAVE_ARCH_MEMCPY 1
  27. extern void *memcpy(void *to, const void *from, size_t len);
  28. extern void *__memcpy(void *to, const void *from, size_t len);
  29. #ifndef CONFIG_FORTIFY_SOURCE
  30. #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
  31. #define memcpy(dst, src, len) \
  32. ({ \
  33. size_t __len = (len); \
  34. void *__ret; \
  35. if (__builtin_constant_p(len) && __len >= 64) \
  36. __ret = __memcpy((dst), (src), __len); \
  37. else \
  38. __ret = __builtin_memcpy((dst), (src), __len); \
  39. __ret; \
  40. })
  41. #endif
  42. #endif /* !CONFIG_FORTIFY_SOURCE */
  43. #define __HAVE_ARCH_MEMSET
  44. void *memset(void *s, int c, size_t n);
  45. void *__memset(void *s, int c, size_t n);
  46. #define __HAVE_ARCH_MEMSET16
  47. static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
  48. {
  49. long d0, d1;
  50. asm volatile("rep\n\t"
  51. "stosw"
  52. : "=&c" (d0), "=&D" (d1)
  53. : "a" (v), "1" (s), "0" (n)
  54. : "memory");
  55. return s;
  56. }
  57. #define __HAVE_ARCH_MEMSET32
  58. static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
  59. {
  60. long d0, d1;
  61. asm volatile("rep\n\t"
  62. "stosl"
  63. : "=&c" (d0), "=&D" (d1)
  64. : "a" (v), "1" (s), "0" (n)
  65. : "memory");
  66. return s;
  67. }
  68. #define __HAVE_ARCH_MEMSET64
  69. static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
  70. {
  71. long d0, d1;
  72. asm volatile("rep\n\t"
  73. "stosq"
  74. : "=&c" (d0), "=&D" (d1)
  75. : "a" (v), "1" (s), "0" (n)
  76. : "memory");
  77. return s;
  78. }
  79. #define __HAVE_ARCH_MEMMOVE
  80. void *memmove(void *dest, const void *src, size_t count);
  81. void *__memmove(void *dest, const void *src, size_t count);
  82. int memcmp(const void *cs, const void *ct, size_t count);
  83. size_t strlen(const char *s);
  84. char *strcpy(char *dest, const char *src);
  85. char *strcat(char *dest, const char *src);
  86. int strcmp(const char *cs, const char *ct);
  87. #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
  88. /*
  89. * For files that not instrumented (e.g. mm/slub.c) we
  90. * should use not instrumented version of mem* functions.
  91. */
  92. #undef memcpy
  93. #define memcpy(dst, src, len) __memcpy(dst, src, len)
  94. #define memmove(dst, src, len) __memmove(dst, src, len)
  95. #define memset(s, c, n) __memset(s, c, n)
  96. #ifndef __NO_FORTIFY
  97. #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
  98. #endif
  99. #endif
  100. #define __HAVE_ARCH_MEMCPY_MCSAFE 1
  101. __must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
  102. size_t cnt);
  103. DECLARE_STATIC_KEY_FALSE(mcsafe_key);
  104. /**
  105. * memcpy_mcsafe - copy memory with indication if a machine check happened
  106. *
  107. * @dst: destination address
  108. * @src: source address
  109. * @cnt: number of bytes to copy
  110. *
  111. * Low level memory copy function that catches machine checks
  112. * We only call into the "safe" function on systems that can
  113. * actually do machine check recovery. Everyone else can just
  114. * use memcpy().
  115. *
  116. * Return 0 for success, or number of bytes not copied if there was an
  117. * exception.
  118. */
  119. static __always_inline __must_check unsigned long
  120. memcpy_mcsafe(void *dst, const void *src, size_t cnt)
  121. {
  122. #ifdef CONFIG_X86_MCE
  123. if (static_branch_unlikely(&mcsafe_key))
  124. return __memcpy_mcsafe(dst, src, cnt);
  125. else
  126. #endif
  127. memcpy(dst, src, cnt);
  128. return 0;
  129. }
  130. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  131. #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
  132. void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
  133. static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
  134. {
  135. if (__builtin_constant_p(cnt)) {
  136. switch (cnt) {
  137. case 4:
  138. asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
  139. return;
  140. case 8:
  141. asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
  142. return;
  143. case 16:
  144. asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
  145. asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
  146. return;
  147. }
  148. }
  149. __memcpy_flushcache(dst, src, cnt);
  150. }
  151. #endif
  152. #endif /* __KERNEL__ */
  153. #endif /* _ASM_X86_STRING_64_H */