swap.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SWAP_H
  3. #define _LINUX_SWAP_H
  4. #include <linux/spinlock.h>
  5. #include <linux/linkage.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/list.h>
  8. #include <linux/memcontrol.h>
  9. #include <linux/sched.h>
  10. #include <linux/node.h>
  11. #include <linux/fs.h>
  12. #include <linux/atomic.h>
  13. #include <linux/page-flags.h>
  14. #include <asm/page.h>
  15. struct notifier_block;
  16. struct bio;
  17. #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
  18. #define SWAP_FLAG_PRIO_MASK 0x7fff
  19. #define SWAP_FLAG_PRIO_SHIFT 0
  20. #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
  21. #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
  22. #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  23. #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  24. SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  25. SWAP_FLAG_DISCARD_PAGES)
  26. #define SWAP_BATCH 64
  27. static inline int current_is_kswapd(void)
  28. {
  29. return current->flags & PF_KSWAPD;
  30. }
  31. /*
  32. * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  33. * be swapped to. The swap type and the offset into that swap type are
  34. * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
  35. * for the type means that the maximum number of swapcache pages is 27 bits
  36. * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
  37. * the type/offset into the pte as 5/27 as well.
  38. */
  39. #define MAX_SWAPFILES_SHIFT 5
  40. /*
  41. * Use some of the swap files numbers for other purposes. This
  42. * is a convenient way to hook into the VM to trigger special
  43. * actions on faults.
  44. */
  45. /*
  46. * Unaddressable device memory support. See include/linux/hmm.h and
  47. * Documentation/vm/hmm.txt. Short description is we need struct pages for
  48. * device memory that is unaddressable (inaccessible) by CPU, so that we can
  49. * migrate part of a process memory to device memory.
  50. *
  51. * When a page is migrated from CPU to device, we set the CPU page table entry
  52. * to a special SWP_DEVICE_* entry.
  53. */
  54. #ifdef CONFIG_DEVICE_PRIVATE
  55. #define SWP_DEVICE_NUM 2
  56. #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  57. #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  58. #else
  59. #define SWP_DEVICE_NUM 0
  60. #endif
  61. /*
  62. * NUMA node memory migration support
  63. */
  64. #ifdef CONFIG_MIGRATION
  65. #define SWP_MIGRATION_NUM 2
  66. #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  67. #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  68. #else
  69. #define SWP_MIGRATION_NUM 0
  70. #endif
  71. /*
  72. * Handling of hardware poisoned pages with memory corruption.
  73. */
  74. #ifdef CONFIG_MEMORY_FAILURE
  75. #define SWP_HWPOISON_NUM 1
  76. #define SWP_HWPOISON MAX_SWAPFILES
  77. #else
  78. #define SWP_HWPOISON_NUM 0
  79. #endif
  80. #define MAX_SWAPFILES \
  81. ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  82. SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  83. /*
  84. * Magic header for a swap area. The first part of the union is
  85. * what the swap magic looks like for the old (limited to 128MB)
  86. * swap area format, the second part of the union adds - in the
  87. * old reserved area - some extra information. Note that the first
  88. * kilobyte is reserved for boot loader or disk label stuff...
  89. *
  90. * Having the magic at the end of the PAGE_SIZE makes detecting swap
  91. * areas somewhat tricky on machines that support multiple page sizes.
  92. * For 2.5 we'll probably want to move the magic to just beyond the
  93. * bootbits...
  94. */
  95. union swap_header {
  96. struct {
  97. char reserved[PAGE_SIZE - 10];
  98. char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
  99. } magic;
  100. struct {
  101. char bootbits[1024]; /* Space for disklabel etc. */
  102. __u32 version;
  103. __u32 last_page;
  104. __u32 nr_badpages;
  105. unsigned char sws_uuid[16];
  106. unsigned char sws_volume[16];
  107. __u32 padding[117];
  108. __u32 badpages[1];
  109. } info;
  110. };
  111. /*
  112. * current->reclaim_state points to one of these when a task is running
  113. * memory reclaim
  114. */
  115. struct reclaim_state {
  116. unsigned long reclaimed_slab;
  117. };
  118. #ifdef __KERNEL__
  119. struct address_space;
  120. struct sysinfo;
  121. struct writeback_control;
  122. struct zone;
  123. /*
  124. * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
  125. * disk blocks. A list of swap extents maps the entire swapfile. (Where the
  126. * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
  127. * from setup, they're handled identically.
  128. *
  129. * We always assume that blocks are of size PAGE_SIZE.
  130. */
  131. struct swap_extent {
  132. struct list_head list;
  133. pgoff_t start_page;
  134. pgoff_t nr_pages;
  135. sector_t start_block;
  136. };
  137. /*
  138. * Max bad pages in the new format..
  139. */
  140. #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
  141. #define MAX_SWAP_BADPAGES \
  142. ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
  143. enum {
  144. SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
  145. SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
  146. SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
  147. SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
  148. SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
  149. SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
  150. SWP_BLKDEV = (1 << 6), /* its a block device */
  151. SWP_FILE = (1 << 7), /* set after swap_activate success */
  152. SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
  153. SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
  154. SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
  155. SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
  156. /* add others here before... */
  157. SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
  158. };
  159. #define SWAP_CLUSTER_MAX 32UL
  160. #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
  161. #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
  162. #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
  163. #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
  164. #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
  165. #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
  166. #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
  167. /*
  168. * We use this to track usage of a cluster. A cluster is a block of swap disk
  169. * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
  170. * free clusters are organized into a list. We fetch an entry from the list to
  171. * get a free cluster.
  172. *
  173. * The data field stores next cluster if the cluster is free or cluster usage
  174. * counter otherwise. The flags field determines if a cluster is free. This is
  175. * protected by swap_info_struct.lock.
  176. */
  177. struct swap_cluster_info {
  178. spinlock_t lock; /*
  179. * Protect swap_cluster_info fields
  180. * and swap_info_struct->swap_map
  181. * elements correspond to the swap
  182. * cluster
  183. */
  184. unsigned int data:24;
  185. unsigned int flags:8;
  186. };
  187. #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
  188. #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
  189. #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
  190. /*
  191. * We assign a cluster to each CPU, so each CPU can allocate swap entry from
  192. * its own cluster and swapout sequentially. The purpose is to optimize swapout
  193. * throughput.
  194. */
  195. struct percpu_cluster {
  196. struct swap_cluster_info index; /* Current cluster index */
  197. unsigned int next; /* Likely next allocation offset */
  198. };
  199. struct swap_cluster_list {
  200. struct swap_cluster_info head;
  201. struct swap_cluster_info tail;
  202. };
  203. /*
  204. * The in-memory structure used to track swap areas.
  205. */
  206. struct swap_info_struct {
  207. unsigned long flags; /* SWP_USED etc: see above */
  208. signed short prio; /* swap priority of this type */
  209. struct plist_node list; /* entry in swap_active_head */
  210. struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
  211. signed char type; /* strange name for an index */
  212. unsigned int max; /* extent of the swap_map */
  213. unsigned char *swap_map; /* vmalloc'ed array of usage counts */
  214. struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
  215. struct swap_cluster_list free_clusters; /* free clusters list */
  216. unsigned int lowest_bit; /* index of first free in swap_map */
  217. unsigned int highest_bit; /* index of last free in swap_map */
  218. unsigned int pages; /* total of usable pages of swap */
  219. unsigned int inuse_pages; /* number of those currently in use */
  220. unsigned int cluster_next; /* likely index for next allocation */
  221. unsigned int cluster_nr; /* countdown to next cluster search */
  222. struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
  223. struct swap_extent *curr_swap_extent;
  224. struct swap_extent first_swap_extent;
  225. struct block_device *bdev; /* swap device or bdev of swap file */
  226. struct file *swap_file; /* seldom referenced */
  227. unsigned int old_block_size; /* seldom referenced */
  228. #ifdef CONFIG_FRONTSWAP
  229. unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
  230. atomic_t frontswap_pages; /* frontswap pages in-use counter */
  231. #endif
  232. spinlock_t lock; /*
  233. * protect map scan related fields like
  234. * swap_map, lowest_bit, highest_bit,
  235. * inuse_pages, cluster_next,
  236. * cluster_nr, lowest_alloc,
  237. * highest_alloc, free/discard cluster
  238. * list. other fields are only changed
  239. * at swapon/swapoff, so are protected
  240. * by swap_lock. changing flags need
  241. * hold this lock and swap_lock. If
  242. * both locks need hold, hold swap_lock
  243. * first.
  244. */
  245. spinlock_t cont_lock; /*
  246. * protect swap count continuation page
  247. * list.
  248. */
  249. struct work_struct discard_work; /* discard worker */
  250. struct swap_cluster_list discard_clusters; /* discard clusters list */
  251. };
  252. #ifdef CONFIG_64BIT
  253. #define SWAP_RA_ORDER_CEILING 5
  254. #else
  255. /* Avoid stack overflow, because we need to save part of page table */
  256. #define SWAP_RA_ORDER_CEILING 3
  257. #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
  258. #endif
  259. struct vma_swap_readahead {
  260. unsigned short win;
  261. unsigned short offset;
  262. unsigned short nr_pte;
  263. #ifdef CONFIG_64BIT
  264. pte_t *ptes;
  265. #else
  266. pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
  267. #endif
  268. };
  269. /* linux/mm/workingset.c */
  270. void *workingset_eviction(struct address_space *mapping, struct page *page);
  271. bool workingset_refault(void *shadow);
  272. void workingset_activation(struct page *page);
  273. /* Do not use directly, use workingset_lookup_update */
  274. void workingset_update_node(struct radix_tree_node *node);
  275. /* Returns workingset_update_node() if the mapping has shadow entries. */
  276. #define workingset_lookup_update(mapping) \
  277. ({ \
  278. radix_tree_update_node_t __helper = workingset_update_node; \
  279. if (dax_mapping(mapping) || shmem_mapping(mapping)) \
  280. __helper = NULL; \
  281. __helper; \
  282. })
  283. /* linux/mm/page_alloc.c */
  284. extern unsigned long totalram_pages;
  285. extern unsigned long totalreserve_pages;
  286. extern unsigned long nr_free_buffer_pages(void);
  287. extern unsigned long nr_free_pagecache_pages(void);
  288. /* Definition of global_zone_page_state not available yet */
  289. #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
  290. /* linux/mm/swap.c */
  291. extern void lru_cache_add(struct page *);
  292. extern void lru_cache_add_anon(struct page *page);
  293. extern void lru_cache_add_file(struct page *page);
  294. extern void lru_add_page_tail(struct page *page, struct page *page_tail,
  295. struct lruvec *lruvec, struct list_head *head);
  296. extern void activate_page(struct page *);
  297. extern void mark_page_accessed(struct page *);
  298. extern void lru_add_drain(void);
  299. extern void lru_add_drain_cpu(int cpu);
  300. extern void lru_add_drain_all(void);
  301. extern void rotate_reclaimable_page(struct page *page);
  302. extern void deactivate_file_page(struct page *page);
  303. extern void mark_page_lazyfree(struct page *page);
  304. extern void swap_setup(void);
  305. extern void add_page_to_unevictable_list(struct page *page);
  306. extern void lru_cache_add_active_or_unevictable(struct page *page,
  307. struct vm_area_struct *vma);
  308. /* linux/mm/vmscan.c */
  309. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  310. extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  311. gfp_t gfp_mask, nodemask_t *mask);
  312. extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
  313. extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  314. unsigned long nr_pages,
  315. gfp_t gfp_mask,
  316. bool may_swap);
  317. extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
  318. gfp_t gfp_mask, bool noswap,
  319. pg_data_t *pgdat,
  320. unsigned long *nr_scanned);
  321. extern unsigned long shrink_all_memory(unsigned long nr_pages);
  322. extern int vm_swappiness;
  323. extern int remove_mapping(struct address_space *mapping, struct page *page);
  324. extern unsigned long vm_total_pages;
  325. #ifdef CONFIG_NUMA
  326. extern int node_reclaim_mode;
  327. extern int sysctl_min_unmapped_ratio;
  328. extern int sysctl_min_slab_ratio;
  329. extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
  330. #else
  331. #define node_reclaim_mode 0
  332. static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
  333. unsigned int order)
  334. {
  335. return 0;
  336. }
  337. #endif
  338. extern int page_evictable(struct page *page);
  339. extern void check_move_unevictable_pages(struct page **, int nr_pages);
  340. extern int kswapd_run(int nid);
  341. extern void kswapd_stop(int nid);
  342. #ifdef CONFIG_SWAP
  343. #include <linux/blk_types.h> /* for bio_end_io_t */
  344. /* linux/mm/page_io.c */
  345. extern int swap_readpage(struct page *page, bool do_poll);
  346. extern int swap_writepage(struct page *page, struct writeback_control *wbc);
  347. extern void end_swap_bio_write(struct bio *bio);
  348. extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
  349. bio_end_io_t end_write_func);
  350. extern int swap_set_page_dirty(struct page *page);
  351. int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
  352. unsigned long nr_pages, sector_t start_block);
  353. int generic_swapfile_activate(struct swap_info_struct *, struct file *,
  354. sector_t *);
  355. /* linux/mm/swap_state.c */
  356. /* One swap address space for each 64M swap space */
  357. #define SWAP_ADDRESS_SPACE_SHIFT 14
  358. #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
  359. extern struct address_space *swapper_spaces[];
  360. extern bool swap_vma_readahead;
  361. #define swap_address_space(entry) \
  362. (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
  363. >> SWAP_ADDRESS_SPACE_SHIFT])
  364. extern unsigned long total_swapcache_pages(void);
  365. extern void show_swap_cache_info(void);
  366. extern int add_to_swap(struct page *page);
  367. extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
  368. extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
  369. extern void __delete_from_swap_cache(struct page *);
  370. extern void delete_from_swap_cache(struct page *);
  371. extern void free_page_and_swap_cache(struct page *);
  372. extern void free_pages_and_swap_cache(struct page **, int);
  373. extern struct page *lookup_swap_cache(swp_entry_t entry,
  374. struct vm_area_struct *vma,
  375. unsigned long addr);
  376. extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
  377. struct vm_area_struct *vma, unsigned long addr,
  378. bool do_poll);
  379. extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
  380. struct vm_area_struct *vma, unsigned long addr,
  381. bool *new_page_allocated);
  382. extern struct page *swapin_readahead(swp_entry_t, gfp_t,
  383. struct vm_area_struct *vma, unsigned long addr);
  384. extern struct page *swap_readahead_detect(struct vm_fault *vmf,
  385. struct vma_swap_readahead *swap_ra);
  386. extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
  387. struct vm_fault *vmf,
  388. struct vma_swap_readahead *swap_ra);
  389. /* linux/mm/swapfile.c */
  390. extern atomic_long_t nr_swap_pages;
  391. extern long total_swap_pages;
  392. extern atomic_t nr_rotate_swap;
  393. extern bool has_usable_swap(void);
  394. static inline bool swap_use_vma_readahead(void)
  395. {
  396. return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
  397. }
  398. /* Swap 50% full? Release swapcache more aggressively.. */
  399. static inline bool vm_swap_full(void)
  400. {
  401. return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
  402. }
  403. static inline long get_nr_swap_pages(void)
  404. {
  405. return atomic_long_read(&nr_swap_pages);
  406. }
  407. extern void si_swapinfo(struct sysinfo *);
  408. extern swp_entry_t get_swap_page(struct page *page);
  409. extern void put_swap_page(struct page *page, swp_entry_t entry);
  410. extern swp_entry_t get_swap_page_of_type(int);
  411. extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
  412. extern int add_swap_count_continuation(swp_entry_t, gfp_t);
  413. extern void swap_shmem_alloc(swp_entry_t);
  414. extern int swap_duplicate(swp_entry_t);
  415. extern int swapcache_prepare(swp_entry_t);
  416. extern void swap_free(swp_entry_t);
  417. extern void swapcache_free_entries(swp_entry_t *entries, int n);
  418. extern int free_swap_and_cache(swp_entry_t);
  419. extern int swap_type_of(dev_t, sector_t, struct block_device **);
  420. extern unsigned int count_swap_pages(int, int);
  421. extern sector_t map_swap_page(struct page *, struct block_device **);
  422. extern sector_t swapdev_block(int, pgoff_t);
  423. extern int page_swapcount(struct page *);
  424. extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
  425. extern int __swp_swapcount(swp_entry_t entry);
  426. extern int swp_swapcount(swp_entry_t entry);
  427. extern struct swap_info_struct *page_swap_info(struct page *);
  428. extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
  429. extern bool reuse_swap_page(struct page *, int *);
  430. extern int try_to_free_swap(struct page *);
  431. struct backing_dev_info;
  432. extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
  433. extern void exit_swap_address_space(unsigned int type);
  434. #else /* CONFIG_SWAP */
  435. static inline int swap_readpage(struct page *page, bool do_poll)
  436. {
  437. return 0;
  438. }
  439. static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
  440. {
  441. return NULL;
  442. }
  443. #define swap_address_space(entry) (NULL)
  444. #define get_nr_swap_pages() 0L
  445. #define total_swap_pages 0L
  446. #define total_swapcache_pages() 0UL
  447. #define vm_swap_full() 0
  448. #define si_swapinfo(val) \
  449. do { (val)->freeswap = (val)->totalswap = 0; } while (0)
  450. /* only sparc can not include linux/pagemap.h in this file
  451. * so leave put_page and release_pages undeclared... */
  452. #define free_page_and_swap_cache(page) \
  453. put_page(page)
  454. #define free_pages_and_swap_cache(pages, nr) \
  455. release_pages((pages), (nr));
  456. static inline void show_swap_cache_info(void)
  457. {
  458. }
  459. #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
  460. #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
  461. static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
  462. {
  463. return 0;
  464. }
  465. static inline void swap_shmem_alloc(swp_entry_t swp)
  466. {
  467. }
  468. static inline int swap_duplicate(swp_entry_t swp)
  469. {
  470. return 0;
  471. }
  472. static inline void swap_free(swp_entry_t swp)
  473. {
  474. }
  475. static inline void put_swap_page(struct page *page, swp_entry_t swp)
  476. {
  477. }
  478. static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
  479. struct vm_area_struct *vma, unsigned long addr)
  480. {
  481. return NULL;
  482. }
  483. static inline bool swap_use_vma_readahead(void)
  484. {
  485. return false;
  486. }
  487. static inline struct page *swap_readahead_detect(
  488. struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
  489. {
  490. return NULL;
  491. }
  492. static inline struct page *do_swap_page_readahead(
  493. swp_entry_t fentry, gfp_t gfp_mask,
  494. struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
  495. {
  496. return NULL;
  497. }
  498. static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
  499. {
  500. return 0;
  501. }
  502. static inline struct page *lookup_swap_cache(swp_entry_t swp,
  503. struct vm_area_struct *vma,
  504. unsigned long addr)
  505. {
  506. return NULL;
  507. }
  508. static inline int add_to_swap(struct page *page)
  509. {
  510. return 0;
  511. }
  512. static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
  513. gfp_t gfp_mask)
  514. {
  515. return -1;
  516. }
  517. static inline void __delete_from_swap_cache(struct page *page)
  518. {
  519. }
  520. static inline void delete_from_swap_cache(struct page *page)
  521. {
  522. }
  523. static inline int page_swapcount(struct page *page)
  524. {
  525. return 0;
  526. }
  527. static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
  528. {
  529. return 0;
  530. }
  531. static inline int __swp_swapcount(swp_entry_t entry)
  532. {
  533. return 0;
  534. }
  535. static inline int swp_swapcount(swp_entry_t entry)
  536. {
  537. return 0;
  538. }
  539. #define reuse_swap_page(page, total_map_swapcount) \
  540. (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
  541. static inline int try_to_free_swap(struct page *page)
  542. {
  543. return 0;
  544. }
  545. static inline swp_entry_t get_swap_page(struct page *page)
  546. {
  547. swp_entry_t entry;
  548. entry.val = 0;
  549. return entry;
  550. }
  551. #endif /* CONFIG_SWAP */
  552. #ifdef CONFIG_THP_SWAP
  553. extern int split_swap_cluster(swp_entry_t entry);
  554. #else
  555. static inline int split_swap_cluster(swp_entry_t entry)
  556. {
  557. return 0;
  558. }
  559. #endif
  560. #ifdef CONFIG_MEMCG
  561. static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  562. {
  563. /* Cgroup2 doesn't have per-cgroup swappiness */
  564. if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
  565. return vm_swappiness;
  566. /* root ? */
  567. if (mem_cgroup_disabled() || !memcg->css.parent)
  568. return vm_swappiness;
  569. return memcg->swappiness;
  570. }
  571. #else
  572. static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
  573. {
  574. return vm_swappiness;
  575. }
  576. #endif
  577. #ifdef CONFIG_MEMCG_SWAP
  578. extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
  579. extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
  580. extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
  581. extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
  582. extern bool mem_cgroup_swap_full(struct page *page);
  583. #else
  584. static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  585. {
  586. }
  587. static inline int mem_cgroup_try_charge_swap(struct page *page,
  588. swp_entry_t entry)
  589. {
  590. return 0;
  591. }
  592. static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
  593. unsigned int nr_pages)
  594. {
  595. }
  596. static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
  597. {
  598. return get_nr_swap_pages();
  599. }
  600. static inline bool mem_cgroup_swap_full(struct page *page)
  601. {
  602. return vm_swap_full();
  603. }
  604. #endif
  605. #endif /* __KERNEL__*/
  606. #endif /* _LINUX_SWAP_H */