pmem.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #ifndef __ASM_X86_PMEM_H__
  14. #define __ASM_X86_PMEM_H__
  15. #include <linux/uaccess.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/cpufeature.h>
  18. #include <asm/special_insns.h>
  19. #ifdef CONFIG_ARCH_HAS_PMEM_API
  20. /**
  21. * arch_memcpy_to_pmem - copy data to persistent memory
  22. * @dst: destination buffer for the copy
  23. * @src: source buffer for the copy
  24. * @n: length of the copy in bytes
  25. *
  26. * Copy data to persistent memory media via non-temporal stores so that
  27. * a subsequent pmem driver flush operation will drain posted write queues.
  28. */
  29. static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
  30. {
  31. int rem;
  32. /*
  33. * We are copying between two kernel buffers, if
  34. * __copy_from_user_inatomic_nocache() returns an error (page
  35. * fault) we would have already reported a general protection fault
  36. * before the WARN+BUG.
  37. */
  38. rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
  39. if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
  40. __func__, dst, src, rem))
  41. BUG();
  42. }
  43. /**
  44. * arch_wb_cache_pmem - write back a cache range with CLWB
  45. * @vaddr: virtual start address
  46. * @size: number of bytes to write back
  47. *
  48. * Write back a cache range using the CLWB (cache line write back)
  49. * instruction. Note that @size is internally rounded up to be cache
  50. * line size aligned.
  51. */
  52. static inline void arch_wb_cache_pmem(void *addr, size_t size)
  53. {
  54. u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
  55. unsigned long clflush_mask = x86_clflush_size - 1;
  56. void *vend = addr + size;
  57. void *p;
  58. for (p = (void *)((unsigned long)addr & ~clflush_mask);
  59. p < vend; p += x86_clflush_size)
  60. clwb(p);
  61. }
  62. /**
  63. * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
  64. * @addr: PMEM destination address
  65. * @bytes: number of bytes to copy
  66. * @i: iterator with source data
  67. *
  68. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
  69. */
  70. static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
  71. struct iov_iter *i)
  72. {
  73. size_t len;
  74. /* TODO: skip the write-back by always using non-temporal stores */
  75. len = copy_from_iter_nocache(addr, bytes, i);
  76. /*
  77. * In the iovec case on x86_64 copy_from_iter_nocache() uses
  78. * non-temporal stores for the bulk of the transfer, but we need
  79. * to manually flush if the transfer is unaligned. A cached
  80. * memory copy is used when destination or size is not naturally
  81. * aligned. That is:
  82. * - Require 8-byte alignment when size is 8 bytes or larger.
  83. * - Require 4-byte alignment when size is 4 bytes.
  84. *
  85. * In the non-iovec case the entire destination needs to be
  86. * flushed.
  87. */
  88. if (iter_is_iovec(i)) {
  89. unsigned long flushed, dest = (unsigned long) addr;
  90. if (bytes < 8) {
  91. if (!IS_ALIGNED(dest, 4) || (bytes != 4))
  92. arch_wb_cache_pmem(addr, bytes);
  93. } else {
  94. if (!IS_ALIGNED(dest, 8)) {
  95. dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
  96. arch_wb_cache_pmem(addr, 1);
  97. }
  98. flushed = dest - (unsigned long) addr;
  99. if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
  100. arch_wb_cache_pmem(addr + bytes - 1, 1);
  101. }
  102. } else
  103. arch_wb_cache_pmem(addr, bytes);
  104. return len;
  105. }
  106. /**
  107. * arch_clear_pmem - zero a PMEM memory range
  108. * @addr: virtual start address
  109. * @size: number of bytes to zero
  110. *
  111. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
  112. */
  113. static inline void arch_clear_pmem(void *addr, size_t size)
  114. {
  115. memset(addr, 0, size);
  116. arch_wb_cache_pmem(addr, size);
  117. }
  118. static inline void arch_invalidate_pmem(void *addr, size_t size)
  119. {
  120. clflush_cache_range(addr, size);
  121. }
  122. #endif /* CONFIG_ARCH_HAS_PMEM_API */
  123. #endif /* __ASM_X86_PMEM_H__ */