swapops.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SWAPOPS_H
  3. #define _LINUX_SWAPOPS_H
  4. #include <linux/radix-tree.h>
  5. #include <linux/bug.h>
  6. #include <linux/mm_types.h>
  7. /*
  8. * swapcache pages are stored in the swapper_space radix tree. We want to
  9. * get good packing density in that tree, so the index should be dense in
  10. * the low-order bits.
  11. *
  12. * We arrange the `type' and `offset' fields so that `type' is at the seven
  13. * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  14. * remaining bits. Although `type' itself needs only five bits, we allow for
  15. * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  16. *
  17. * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  18. */
  19. #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
  20. #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
  21. /*
  22. * Store a type+offset into a swp_entry_t in an arch-independent format
  23. */
  24. static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  25. {
  26. swp_entry_t ret;
  27. ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
  28. return ret;
  29. }
  30. /*
  31. * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
  32. * arch-independent format
  33. */
  34. static inline unsigned swp_type(swp_entry_t entry)
  35. {
  36. return (entry.val >> SWP_TYPE_SHIFT);
  37. }
  38. /*
  39. * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
  40. * arch-independent format
  41. */
  42. static inline pgoff_t swp_offset(swp_entry_t entry)
  43. {
  44. return entry.val & SWP_OFFSET_MASK;
  45. }
  46. #ifdef CONFIG_MMU
  47. /* check whether a pte points to a swap entry */
  48. static inline int is_swap_pte(pte_t pte)
  49. {
  50. return !pte_none(pte) && !pte_present(pte);
  51. }
  52. #endif
  53. /*
  54. * Convert the arch-dependent pte representation of a swp_entry_t into an
  55. * arch-independent swp_entry_t.
  56. */
  57. static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  58. {
  59. swp_entry_t arch_entry;
  60. if (pte_swp_soft_dirty(pte))
  61. pte = pte_swp_clear_soft_dirty(pte);
  62. arch_entry = __pte_to_swp_entry(pte);
  63. return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  64. }
  65. /*
  66. * Convert the arch-independent representation of a swp_entry_t into the
  67. * arch-dependent pte representation.
  68. */
  69. static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  70. {
  71. swp_entry_t arch_entry;
  72. arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  73. return __swp_entry_to_pte(arch_entry);
  74. }
  75. static inline swp_entry_t radix_to_swp_entry(void *arg)
  76. {
  77. swp_entry_t entry;
  78. entry.val = xa_to_value(arg);
  79. return entry;
  80. }
  81. static inline void *swp_to_radix_entry(swp_entry_t entry)
  82. {
  83. return xa_mk_value(entry.val);
  84. }
  85. #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
  86. static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
  87. {
  88. return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
  89. page_to_pfn(page));
  90. }
  91. static inline bool is_device_private_entry(swp_entry_t entry)
  92. {
  93. int type = swp_type(entry);
  94. return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
  95. }
  96. static inline void make_device_private_entry_read(swp_entry_t *entry)
  97. {
  98. *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
  99. }
  100. static inline bool is_write_device_private_entry(swp_entry_t entry)
  101. {
  102. return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
  103. }
  104. static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
  105. {
  106. return swp_offset(entry);
  107. }
  108. static inline struct page *device_private_entry_to_page(swp_entry_t entry)
  109. {
  110. return pfn_to_page(swp_offset(entry));
  111. }
  112. vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
  113. unsigned long addr,
  114. swp_entry_t entry,
  115. unsigned int flags,
  116. pmd_t *pmdp);
  117. #else /* CONFIG_DEVICE_PRIVATE */
  118. static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
  119. {
  120. return swp_entry(0, 0);
  121. }
  122. static inline void make_device_private_entry_read(swp_entry_t *entry)
  123. {
  124. }
  125. static inline bool is_device_private_entry(swp_entry_t entry)
  126. {
  127. return false;
  128. }
  129. static inline bool is_write_device_private_entry(swp_entry_t entry)
  130. {
  131. return false;
  132. }
  133. static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
  134. {
  135. return 0;
  136. }
  137. static inline struct page *device_private_entry_to_page(swp_entry_t entry)
  138. {
  139. return NULL;
  140. }
  141. static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
  142. unsigned long addr,
  143. swp_entry_t entry,
  144. unsigned int flags,
  145. pmd_t *pmdp)
  146. {
  147. return VM_FAULT_SIGBUS;
  148. }
  149. #endif /* CONFIG_DEVICE_PRIVATE */
  150. #ifdef CONFIG_MIGRATION
  151. static inline swp_entry_t make_migration_entry(struct page *page, int write)
  152. {
  153. BUG_ON(!PageLocked(compound_head(page)));
  154. return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
  155. page_to_pfn(page));
  156. }
  157. static inline int is_migration_entry(swp_entry_t entry)
  158. {
  159. return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
  160. swp_type(entry) == SWP_MIGRATION_WRITE);
  161. }
  162. static inline int is_write_migration_entry(swp_entry_t entry)
  163. {
  164. return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
  165. }
  166. static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
  167. {
  168. return swp_offset(entry);
  169. }
  170. static inline struct page *migration_entry_to_page(swp_entry_t entry)
  171. {
  172. struct page *p = pfn_to_page(swp_offset(entry));
  173. /*
  174. * Any use of migration entries may only occur while the
  175. * corresponding page is locked
  176. */
  177. BUG_ON(!PageLocked(compound_head(p)));
  178. return p;
  179. }
  180. static inline void make_migration_entry_read(swp_entry_t *entry)
  181. {
  182. *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
  183. }
  184. extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  185. spinlock_t *ptl);
  186. extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  187. unsigned long address);
  188. extern void migration_entry_wait_huge(struct vm_area_struct *vma,
  189. struct mm_struct *mm, pte_t *pte);
  190. #else
  191. #define make_migration_entry(page, write) swp_entry(0, 0)
  192. static inline int is_migration_entry(swp_entry_t swp)
  193. {
  194. return 0;
  195. }
  196. static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
  197. {
  198. return 0;
  199. }
  200. static inline struct page *migration_entry_to_page(swp_entry_t entry)
  201. {
  202. return NULL;
  203. }
  204. static inline void make_migration_entry_read(swp_entry_t *entryp) { }
  205. static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  206. spinlock_t *ptl) { }
  207. static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  208. unsigned long address) { }
  209. static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
  210. struct mm_struct *mm, pte_t *pte) { }
  211. static inline int is_write_migration_entry(swp_entry_t entry)
  212. {
  213. return 0;
  214. }
  215. #endif
  216. struct page_vma_mapped_walk;
  217. #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  218. extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
  219. struct page *page);
  220. extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
  221. struct page *new);
  222. extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
  223. static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
  224. {
  225. swp_entry_t arch_entry;
  226. if (pmd_swp_soft_dirty(pmd))
  227. pmd = pmd_swp_clear_soft_dirty(pmd);
  228. arch_entry = __pmd_to_swp_entry(pmd);
  229. return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  230. }
  231. static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
  232. {
  233. swp_entry_t arch_entry;
  234. arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  235. return __swp_entry_to_pmd(arch_entry);
  236. }
  237. static inline int is_pmd_migration_entry(pmd_t pmd)
  238. {
  239. return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
  240. }
  241. #else
  242. static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
  243. struct page *page)
  244. {
  245. BUILD_BUG();
  246. }
  247. static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
  248. struct page *new)
  249. {
  250. BUILD_BUG();
  251. }
  252. static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
  253. static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
  254. {
  255. return swp_entry(0, 0);
  256. }
  257. static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
  258. {
  259. return __pmd(0);
  260. }
  261. static inline int is_pmd_migration_entry(pmd_t pmd)
  262. {
  263. return 0;
  264. }
  265. #endif
  266. #ifdef CONFIG_MEMORY_FAILURE
  267. extern atomic_long_t num_poisoned_pages __read_mostly;
  268. /*
  269. * Support for hardware poisoned pages
  270. */
  271. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  272. {
  273. BUG_ON(!PageLocked(page));
  274. return swp_entry(SWP_HWPOISON, page_to_pfn(page));
  275. }
  276. static inline int is_hwpoison_entry(swp_entry_t entry)
  277. {
  278. return swp_type(entry) == SWP_HWPOISON;
  279. }
  280. static inline void num_poisoned_pages_inc(void)
  281. {
  282. atomic_long_inc(&num_poisoned_pages);
  283. }
  284. static inline void num_poisoned_pages_dec(void)
  285. {
  286. atomic_long_dec(&num_poisoned_pages);
  287. }
  288. #else
  289. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  290. {
  291. return swp_entry(0, 0);
  292. }
  293. static inline int is_hwpoison_entry(swp_entry_t swp)
  294. {
  295. return 0;
  296. }
  297. static inline void num_poisoned_pages_inc(void)
  298. {
  299. }
  300. #endif
  301. #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
  302. static inline int non_swap_entry(swp_entry_t entry)
  303. {
  304. return swp_type(entry) >= MAX_SWAPFILES;
  305. }
  306. #else
  307. static inline int non_swap_entry(swp_entry_t entry)
  308. {
  309. return 0;
  310. }
  311. #endif
  312. #endif /* _LINUX_SWAPOPS_H */