pmem.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #ifndef __PMEM_H__
  14. #define __PMEM_H__
  15. #include <linux/io.h>
  16. #include <linux/uio.h>
  17. #ifdef CONFIG_ARCH_HAS_PMEM_API
  18. #define ARCH_MEMREMAP_PMEM MEMREMAP_WB
  19. #include <asm/pmem.h>
  20. #else
  21. #define ARCH_MEMREMAP_PMEM MEMREMAP_WT
  22. /*
  23. * These are simply here to enable compilation, all call sites gate
  24. * calling these symbols with arch_has_pmem_api() and redirect to the
  25. * implementation in asm/pmem.h.
  26. */
  27. static inline bool __arch_has_wmb_pmem(void)
  28. {
  29. return false;
  30. }
  31. static inline void arch_wmb_pmem(void)
  32. {
  33. BUG();
  34. }
  35. static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
  36. size_t n)
  37. {
  38. BUG();
  39. }
  40. static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  41. struct iov_iter *i)
  42. {
  43. BUG();
  44. return 0;
  45. }
  46. static inline void arch_clear_pmem(void __pmem *addr, size_t size)
  47. {
  48. BUG();
  49. }
  50. #endif
  51. /*
  52. * Architectures that define ARCH_HAS_PMEM_API must provide
  53. * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
  54. * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
  55. */
  56. static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
  57. {
  58. memcpy(dst, (void __force const *) src, size);
  59. }
  60. static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
  61. {
  62. devm_memunmap(dev, (void __force *) addr);
  63. }
  64. static inline bool arch_has_pmem_api(void)
  65. {
  66. return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
  67. }
  68. /**
  69. * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
  70. *
  71. * For a given cpu implementation within an architecture it is possible
  72. * that wmb_pmem() resolves to a nop. In the case this returns
  73. * false, pmem api users are unable to ensure durability and may want to
  74. * fall back to a different data consistency model, or otherwise notify
  75. * the user.
  76. */
  77. static inline bool arch_has_wmb_pmem(void)
  78. {
  79. return arch_has_pmem_api() && __arch_has_wmb_pmem();
  80. }
  81. /*
  82. * These defaults seek to offer decent performance and minimize the
  83. * window between i/o completion and writes being durable on media.
  84. * However, it is undefined / architecture specific whether
  85. * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
  86. * making data durable relative to i/o completion.
  87. */
  88. static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
  89. size_t size)
  90. {
  91. memcpy((void __force *) dst, src, size);
  92. }
  93. static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
  94. size_t bytes, struct iov_iter *i)
  95. {
  96. return copy_from_iter_nocache((void __force *)addr, bytes, i);
  97. }
  98. static inline void default_clear_pmem(void __pmem *addr, size_t size)
  99. {
  100. if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
  101. clear_page((void __force *)addr);
  102. else
  103. memset((void __force *)addr, 0, size);
  104. }
  105. /**
  106. * memremap_pmem - map physical persistent memory for pmem api
  107. * @offset: physical address of persistent memory
  108. * @size: size of the mapping
  109. *
  110. * Establish a mapping of the architecture specific memory type expected
  111. * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
  112. * the case that an uncacheable or writethrough mapping is sufficient,
  113. * or a writeback mapping provided memcpy_to_pmem() and
  114. * wmb_pmem() arrange for the data to be written through the
  115. * cache to persistent media.
  116. */
  117. static inline void __pmem *memremap_pmem(struct device *dev,
  118. resource_size_t offset, unsigned long size)
  119. {
  120. return (void __pmem *) devm_memremap(dev, offset, size,
  121. ARCH_MEMREMAP_PMEM);
  122. }
  123. /**
  124. * memcpy_to_pmem - copy data to persistent memory
  125. * @dst: destination buffer for the copy
  126. * @src: source buffer for the copy
  127. * @n: length of the copy in bytes
  128. *
  129. * Perform a memory copy that results in the destination of the copy
  130. * being effectively evicted from, or never written to, the processor
  131. * cache hierarchy after the copy completes. After memcpy_to_pmem()
  132. * data may still reside in cpu or platform buffers, so this operation
  133. * must be followed by a wmb_pmem().
  134. */
  135. static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
  136. {
  137. if (arch_has_pmem_api())
  138. arch_memcpy_to_pmem(dst, src, n);
  139. else
  140. default_memcpy_to_pmem(dst, src, n);
  141. }
  142. /**
  143. * wmb_pmem - synchronize writes to persistent memory
  144. *
  145. * After a series of memcpy_to_pmem() operations this drains data from
  146. * cpu write buffers and any platform (memory controller) buffers to
  147. * ensure that written data is durable on persistent memory media.
  148. */
  149. static inline void wmb_pmem(void)
  150. {
  151. if (arch_has_wmb_pmem())
  152. arch_wmb_pmem();
  153. else
  154. wmb();
  155. }
  156. /**
  157. * copy_from_iter_pmem - copy data from an iterator to PMEM
  158. * @addr: PMEM destination address
  159. * @bytes: number of bytes to copy
  160. * @i: iterator with source data
  161. *
  162. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
  163. * This function requires explicit ordering with a wmb_pmem() call.
  164. */
  165. static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  166. struct iov_iter *i)
  167. {
  168. if (arch_has_pmem_api())
  169. return arch_copy_from_iter_pmem(addr, bytes, i);
  170. return default_copy_from_iter_pmem(addr, bytes, i);
  171. }
  172. /**
  173. * clear_pmem - zero a PMEM memory range
  174. * @addr: virtual start address
  175. * @size: number of bytes to zero
  176. *
  177. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
  178. * This function requires explicit ordering with a wmb_pmem() call.
  179. */
  180. static inline void clear_pmem(void __pmem *addr, size_t size)
  181. {
  182. if (arch_has_pmem_api())
  183. arch_clear_pmem(addr, size);
  184. else
  185. default_clear_pmem(addr, size);
  186. }
  187. #endif /* __PMEM_H__ */