mmzone.h 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. #ifndef _LINUX_MMZONE_H
  2. #define _LINUX_MMZONE_H
  3. #ifndef __ASSEMBLY__
  4. #ifndef __GENERATING_BOUNDS_H
  5. #include <linux/spinlock.h>
  6. #include <linux/list.h>
  7. #include <linux/wait.h>
  8. #include <linux/bitops.h>
  9. #include <linux/cache.h>
  10. #include <linux/threads.h>
  11. #include <linux/numa.h>
  12. #include <linux/init.h>
  13. #include <linux/seqlock.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pageblock-flags.h>
  16. #include <linux/page-flags-layout.h>
  17. #include <linux/atomic.h>
  18. #include <asm/page.h>
  19. /* Free memory management - zoned buddy allocator. */
  20. #ifndef CONFIG_FORCE_MAX_ZONEORDER
  21. #define MAX_ORDER 11
  22. #else
  23. #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  24. #endif
  25. #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
  26. /*
  27. * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
  28. * costly to service. That is between allocation orders which should
  29. * coalesce naturally under reasonable reclaim pressure and those which
  30. * will not.
  31. */
  32. #define PAGE_ALLOC_COSTLY_ORDER 3
  33. enum {
  34. MIGRATE_UNMOVABLE,
  35. MIGRATE_RECLAIMABLE,
  36. MIGRATE_MOVABLE,
  37. MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
  38. MIGRATE_RESERVE = MIGRATE_PCPTYPES,
  39. #ifdef CONFIG_CMA
  40. /*
  41. * MIGRATE_CMA migration type is designed to mimic the way
  42. * ZONE_MOVABLE works. Only movable pages can be allocated
  43. * from MIGRATE_CMA pageblocks and page allocator never
  44. * implicitly change migration type of MIGRATE_CMA pageblock.
  45. *
  46. * The way to use it is to change migratetype of a range of
  47. * pageblocks to MIGRATE_CMA which can be done by
  48. * __free_pageblock_cma() function. What is important though
  49. * is that a range of pageblocks must be aligned to
  50. * MAX_ORDER_NR_PAGES should biggest page be bigger then
  51. * a single pageblock.
  52. */
  53. MIGRATE_CMA,
  54. #endif
  55. #ifdef CONFIG_MEMORY_ISOLATION
  56. MIGRATE_ISOLATE, /* can't allocate from here */
  57. #endif
  58. MIGRATE_TYPES
  59. };
  60. #ifdef CONFIG_CMA
  61. # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
  62. #else
  63. # define is_migrate_cma(migratetype) false
  64. #endif
  65. #define for_each_migratetype_order(order, type) \
  66. for (order = 0; order < MAX_ORDER; order++) \
  67. for (type = 0; type < MIGRATE_TYPES; type++)
  68. extern int page_group_by_mobility_disabled;
  69. static inline int get_pageblock_migratetype(struct page *page)
  70. {
  71. return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
  72. }
  73. struct free_area {
  74. struct list_head free_list[MIGRATE_TYPES];
  75. unsigned long nr_free;
  76. };
  77. struct pglist_data;
  78. /*
  79. * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
  80. * So add a wild amount of padding here to ensure that they fall into separate
  81. * cachelines. There are very few zone structures in the machine, so space
  82. * consumption is not a concern here.
  83. */
  84. #if defined(CONFIG_SMP)
  85. struct zone_padding {
  86. char x[0];
  87. } ____cacheline_internodealigned_in_smp;
  88. #define ZONE_PADDING(name) struct zone_padding name;
  89. #else
  90. #define ZONE_PADDING(name)
  91. #endif
  92. enum zone_stat_item {
  93. /* First 128 byte cacheline (assuming 64 bit words) */
  94. NR_FREE_PAGES,
  95. NR_ALLOC_BATCH,
  96. NR_LRU_BASE,
  97. NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  98. NR_ACTIVE_ANON, /* " " " " " */
  99. NR_INACTIVE_FILE, /* " " " " " */
  100. NR_ACTIVE_FILE, /* " " " " " */
  101. NR_UNEVICTABLE, /* " " " " " */
  102. NR_MLOCK, /* mlock()ed pages found and moved off LRU */
  103. NR_ANON_PAGES, /* Mapped anonymous pages */
  104. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
  105. only modified from process context */
  106. NR_FILE_PAGES,
  107. NR_FILE_DIRTY,
  108. NR_WRITEBACK,
  109. NR_SLAB_RECLAIMABLE,
  110. NR_SLAB_UNRECLAIMABLE,
  111. NR_PAGETABLE, /* used for pagetables */
  112. NR_KERNEL_STACK,
  113. /* Second 128 byte cacheline */
  114. NR_UNSTABLE_NFS, /* NFS unstable pages */
  115. NR_BOUNCE,
  116. NR_VMSCAN_WRITE,
  117. NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
  118. NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
  119. NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
  120. NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
  121. NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
  122. NR_DIRTIED, /* page dirtyings since bootup */
  123. NR_WRITTEN, /* page writings since bootup */
  124. #ifdef CONFIG_NUMA
  125. NUMA_HIT, /* allocated in intended node */
  126. NUMA_MISS, /* allocated in non intended node */
  127. NUMA_FOREIGN, /* was intended here, hit elsewhere */
  128. NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
  129. NUMA_LOCAL, /* allocation from local node */
  130. NUMA_OTHER, /* allocation from other node */
  131. #endif
  132. WORKINGSET_REFAULT,
  133. WORKINGSET_ACTIVATE,
  134. NR_ANON_TRANSPARENT_HUGEPAGES,
  135. NR_FREE_CMA_PAGES,
  136. NR_VM_ZONE_STAT_ITEMS };
  137. /*
  138. * We do arithmetic on the LRU lists in various places in the code,
  139. * so it is important to keep the active lists LRU_ACTIVE higher in
  140. * the array than the corresponding inactive lists, and to keep
  141. * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
  142. *
  143. * This has to be kept in sync with the statistics in zone_stat_item
  144. * above and the descriptions in vmstat_text in mm/vmstat.c
  145. */
  146. #define LRU_BASE 0
  147. #define LRU_ACTIVE 1
  148. #define LRU_FILE 2
  149. enum lru_list {
  150. LRU_INACTIVE_ANON = LRU_BASE,
  151. LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
  152. LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
  153. LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
  154. LRU_UNEVICTABLE,
  155. NR_LRU_LISTS
  156. };
  157. #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
  158. #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
  159. static inline int is_file_lru(enum lru_list lru)
  160. {
  161. return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
  162. }
  163. static inline int is_active_lru(enum lru_list lru)
  164. {
  165. return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
  166. }
  167. static inline int is_unevictable_lru(enum lru_list lru)
  168. {
  169. return (lru == LRU_UNEVICTABLE);
  170. }
  171. struct zone_reclaim_stat {
  172. /*
  173. * The pageout code in vmscan.c keeps track of how many of the
  174. * mem/swap backed and file backed pages are referenced.
  175. * The higher the rotated/scanned ratio, the more valuable
  176. * that cache is.
  177. *
  178. * The anon LRU stats live in [0], file LRU stats in [1]
  179. */
  180. unsigned long recent_rotated[2];
  181. unsigned long recent_scanned[2];
  182. };
  183. struct lruvec {
  184. struct list_head lists[NR_LRU_LISTS];
  185. struct zone_reclaim_stat reclaim_stat;
  186. #ifdef CONFIG_MEMCG
  187. struct zone *zone;
  188. #endif
  189. };
  190. /* Mask used at gathering information at once (see memcontrol.c) */
  191. #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
  192. #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
  193. #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
  194. /* Isolate clean file */
  195. #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
  196. /* Isolate unmapped file */
  197. #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
  198. /* Isolate for asynchronous migration */
  199. #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
  200. /* Isolate unevictable pages */
  201. #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
  202. /* LRU Isolation modes. */
  203. typedef unsigned __bitwise__ isolate_mode_t;
  204. enum zone_watermarks {
  205. WMARK_MIN,
  206. WMARK_LOW,
  207. WMARK_HIGH,
  208. NR_WMARK
  209. };
  210. #define min_wmark_pages(z) (z->watermark[WMARK_MIN])
  211. #define low_wmark_pages(z) (z->watermark[WMARK_LOW])
  212. #define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
  213. struct per_cpu_pages {
  214. int count; /* number of pages in the list */
  215. int high; /* high watermark, emptying needed */
  216. int batch; /* chunk size for buddy add/remove */
  217. /* Lists of pages, one per migrate type stored on the pcp-lists */
  218. struct list_head lists[MIGRATE_PCPTYPES];
  219. };
  220. struct per_cpu_pageset {
  221. struct per_cpu_pages pcp;
  222. #ifdef CONFIG_NUMA
  223. s8 expire;
  224. #endif
  225. #ifdef CONFIG_SMP
  226. s8 stat_threshold;
  227. s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  228. #endif
  229. };
  230. #endif /* !__GENERATING_BOUNDS.H */
  231. enum zone_type {
  232. #ifdef CONFIG_ZONE_DMA
  233. /*
  234. * ZONE_DMA is used when there are devices that are not able
  235. * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
  236. * carve out the portion of memory that is needed for these devices.
  237. * The range is arch specific.
  238. *
  239. * Some examples
  240. *
  241. * Architecture Limit
  242. * ---------------------------
  243. * parisc, ia64, sparc <4G
  244. * s390 <2G
  245. * arm Various
  246. * alpha Unlimited or 0-16MB.
  247. *
  248. * i386, x86_64 and multiple other arches
  249. * <16M.
  250. */
  251. ZONE_DMA,
  252. #endif
  253. #ifdef CONFIG_ZONE_DMA32
  254. /*
  255. * x86_64 needs two ZONE_DMAs because it supports devices that are
  256. * only able to do DMA to the lower 16M but also 32 bit devices that
  257. * can only do DMA areas below 4G.
  258. */
  259. ZONE_DMA32,
  260. #endif
  261. /*
  262. * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  263. * performed on pages in ZONE_NORMAL if the DMA devices support
  264. * transfers to all addressable memory.
  265. */
  266. ZONE_NORMAL,
  267. #ifdef CONFIG_HIGHMEM
  268. /*
  269. * A memory area that is only addressable by the kernel through
  270. * mapping portions into its own address space. This is for example
  271. * used by i386 to allow the kernel to address the memory beyond
  272. * 900MB. The kernel will set up special mappings (page
  273. * table entries on i386) for each page that the kernel needs to
  274. * access.
  275. */
  276. ZONE_HIGHMEM,
  277. #endif
  278. ZONE_MOVABLE,
  279. __MAX_NR_ZONES
  280. };
  281. #ifndef __GENERATING_BOUNDS_H
  282. struct zone {
  283. /* Fields commonly accessed by the page allocator */
  284. /* zone watermarks, access with *_wmark_pages(zone) macros */
  285. unsigned long watermark[NR_WMARK];
  286. /*
  287. * When free pages are below this point, additional steps are taken
  288. * when reading the number of free pages to avoid per-cpu counter
  289. * drift allowing watermarks to be breached
  290. */
  291. unsigned long percpu_drift_mark;
  292. /*
  293. * We don't know if the memory that we're going to allocate will be freeable
  294. * or/and it will be released eventually, so to avoid totally wasting several
  295. * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  296. * to run OOM on the lower zones despite there's tons of freeable ram
  297. * on the higher zones). This array is recalculated at runtime if the
  298. * sysctl_lowmem_reserve_ratio sysctl changes.
  299. */
  300. unsigned long lowmem_reserve[MAX_NR_ZONES];
  301. /*
  302. * This is a per-zone reserve of pages that should not be
  303. * considered dirtyable memory.
  304. */
  305. unsigned long dirty_balance_reserve;
  306. #ifdef CONFIG_NUMA
  307. int node;
  308. /*
  309. * zone reclaim becomes active if more unmapped pages exist.
  310. */
  311. unsigned long min_unmapped_pages;
  312. unsigned long min_slab_pages;
  313. #endif
  314. struct per_cpu_pageset __percpu *pageset;
  315. /*
  316. * free areas of different sizes
  317. */
  318. spinlock_t lock;
  319. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  320. /* Set to true when the PG_migrate_skip bits should be cleared */
  321. bool compact_blockskip_flush;
  322. /* pfns where compaction scanners should start */
  323. unsigned long compact_cached_free_pfn;
  324. unsigned long compact_cached_migrate_pfn;
  325. #endif
  326. #ifdef CONFIG_MEMORY_HOTPLUG
  327. /* see spanned/present_pages for more description */
  328. seqlock_t span_seqlock;
  329. #endif
  330. struct free_area free_area[MAX_ORDER];
  331. #ifndef CONFIG_SPARSEMEM
  332. /*
  333. * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
  334. * In SPARSEMEM, this map is stored in struct mem_section
  335. */
  336. unsigned long *pageblock_flags;
  337. #endif /* CONFIG_SPARSEMEM */
  338. #ifdef CONFIG_COMPACTION
  339. /*
  340. * On compaction failure, 1<<compact_defer_shift compactions
  341. * are skipped before trying again. The number attempted since
  342. * last failure is tracked with compact_considered.
  343. */
  344. unsigned int compact_considered;
  345. unsigned int compact_defer_shift;
  346. int compact_order_failed;
  347. #endif
  348. ZONE_PADDING(_pad1_)
  349. /* Fields commonly accessed by the page reclaim scanner */
  350. spinlock_t lru_lock;
  351. struct lruvec lruvec;
  352. /* Evictions & activations on the inactive file list */
  353. atomic_long_t inactive_age;
  354. unsigned long pages_scanned; /* since last reclaim */
  355. unsigned long flags; /* zone flags, see below */
  356. /* Zone statistics */
  357. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  358. /*
  359. * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  360. * this zone's LRU. Maintained by the pageout code.
  361. */
  362. unsigned int inactive_ratio;
  363. ZONE_PADDING(_pad2_)
  364. /* Rarely used or read-mostly fields */
  365. /*
  366. * wait_table -- the array holding the hash table
  367. * wait_table_hash_nr_entries -- the size of the hash table array
  368. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  369. *
  370. * The purpose of all these is to keep track of the people
  371. * waiting for a page to become available and make them
  372. * runnable again when possible. The trouble is that this
  373. * consumes a lot of space, especially when so few things
  374. * wait on pages at a given time. So instead of using
  375. * per-page waitqueues, we use a waitqueue hash table.
  376. *
  377. * The bucket discipline is to sleep on the same queue when
  378. * colliding and wake all in that wait queue when removing.
  379. * When something wakes, it must check to be sure its page is
  380. * truly available, a la thundering herd. The cost of a
  381. * collision is great, but given the expected load of the
  382. * table, they should be so rare as to be outweighed by the
  383. * benefits from the saved space.
  384. *
  385. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  386. * primary users of these fields, and in mm/page_alloc.c
  387. * free_area_init_core() performs the initialization of them.
  388. */
  389. wait_queue_head_t * wait_table;
  390. unsigned long wait_table_hash_nr_entries;
  391. unsigned long wait_table_bits;
  392. /*
  393. * Discontig memory support fields.
  394. */
  395. struct pglist_data *zone_pgdat;
  396. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  397. unsigned long zone_start_pfn;
  398. /*
  399. * spanned_pages is the total pages spanned by the zone, including
  400. * holes, which is calculated as:
  401. * spanned_pages = zone_end_pfn - zone_start_pfn;
  402. *
  403. * present_pages is physical pages existing within the zone, which
  404. * is calculated as:
  405. * present_pages = spanned_pages - absent_pages(pages in holes);
  406. *
  407. * managed_pages is present pages managed by the buddy system, which
  408. * is calculated as (reserved_pages includes pages allocated by the
  409. * bootmem allocator):
  410. * managed_pages = present_pages - reserved_pages;
  411. *
  412. * So present_pages may be used by memory hotplug or memory power
  413. * management logic to figure out unmanaged pages by checking
  414. * (present_pages - managed_pages). And managed_pages should be used
  415. * by page allocator and vm scanner to calculate all kinds of watermarks
  416. * and thresholds.
  417. *
  418. * Locking rules:
  419. *
  420. * zone_start_pfn and spanned_pages are protected by span_seqlock.
  421. * It is a seqlock because it has to be read outside of zone->lock,
  422. * and it is done in the main allocator path. But, it is written
  423. * quite infrequently.
  424. *
  425. * The span_seq lock is declared along with zone->lock because it is
  426. * frequently read in proximity to zone->lock. It's good to
  427. * give them a chance of being in the same cacheline.
  428. *
  429. * Write access to present_pages at runtime should be protected by
  430. * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't
  431. * tolerant drift of present_pages should hold memory hotplug lock to
  432. * get a stable value.
  433. *
  434. * Read access to managed_pages should be safe because it's unsigned
  435. * long. Write access to zone->managed_pages and totalram_pages are
  436. * protected by managed_page_count_lock at runtime. Idealy only
  437. * adjust_managed_page_count() should be used instead of directly
  438. * touching zone->managed_pages and totalram_pages.
  439. */
  440. unsigned long spanned_pages;
  441. unsigned long present_pages;
  442. unsigned long managed_pages;
  443. /*
  444. * Number of MIGRATE_RESEVE page block. To maintain for just
  445. * optimization. Protected by zone->lock.
  446. */
  447. int nr_migrate_reserve_block;
  448. /*
  449. * rarely used fields:
  450. */
  451. const char *name;
  452. } ____cacheline_internodealigned_in_smp;
  453. typedef enum {
  454. ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
  455. ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
  456. ZONE_CONGESTED, /* zone has many dirty pages backed by
  457. * a congested BDI
  458. */
  459. ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
  460. * many dirty file pages at the tail
  461. * of the LRU.
  462. */
  463. ZONE_WRITEBACK, /* reclaim scanning has recently found
  464. * many pages under writeback
  465. */
  466. } zone_flags_t;
  467. static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
  468. {
  469. set_bit(flag, &zone->flags);
  470. }
  471. static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
  472. {
  473. return test_and_set_bit(flag, &zone->flags);
  474. }
  475. static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
  476. {
  477. clear_bit(flag, &zone->flags);
  478. }
  479. static inline int zone_is_reclaim_congested(const struct zone *zone)
  480. {
  481. return test_bit(ZONE_CONGESTED, &zone->flags);
  482. }
  483. static inline int zone_is_reclaim_dirty(const struct zone *zone)
  484. {
  485. return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
  486. }
  487. static inline int zone_is_reclaim_writeback(const struct zone *zone)
  488. {
  489. return test_bit(ZONE_WRITEBACK, &zone->flags);
  490. }
  491. static inline int zone_is_reclaim_locked(const struct zone *zone)
  492. {
  493. return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
  494. }
  495. static inline int zone_is_oom_locked(const struct zone *zone)
  496. {
  497. return test_bit(ZONE_OOM_LOCKED, &zone->flags);
  498. }
  499. static inline unsigned long zone_end_pfn(const struct zone *zone)
  500. {
  501. return zone->zone_start_pfn + zone->spanned_pages;
  502. }
  503. static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
  504. {
  505. return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
  506. }
  507. static inline bool zone_is_initialized(struct zone *zone)
  508. {
  509. return !!zone->wait_table;
  510. }
  511. static inline bool zone_is_empty(struct zone *zone)
  512. {
  513. return zone->spanned_pages == 0;
  514. }
  515. /*
  516. * The "priority" of VM scanning is how much of the queues we will scan in one
  517. * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
  518. * queues ("queue_length >> 12") during an aging round.
  519. */
  520. #define DEF_PRIORITY 12
  521. /* Maximum number of zones on a zonelist */
  522. #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
  523. #ifdef CONFIG_NUMA
  524. /*
  525. * The NUMA zonelists are doubled because we need zonelists that restrict the
  526. * allocations to a single node for __GFP_THISNODE.
  527. *
  528. * [0] : Zonelist with fallback
  529. * [1] : No fallback (__GFP_THISNODE)
  530. */
  531. #define MAX_ZONELISTS 2
  532. /*
  533. * We cache key information from each zonelist for smaller cache
  534. * footprint when scanning for free pages in get_page_from_freelist().
  535. *
  536. * 1) The BITMAP fullzones tracks which zones in a zonelist have come
  537. * up short of free memory since the last time (last_fullzone_zap)
  538. * we zero'd fullzones.
  539. * 2) The array z_to_n[] maps each zone in the zonelist to its node
  540. * id, so that we can efficiently evaluate whether that node is
  541. * set in the current tasks mems_allowed.
  542. *
  543. * Both fullzones and z_to_n[] are one-to-one with the zonelist,
  544. * indexed by a zones offset in the zonelist zones[] array.
  545. *
  546. * The get_page_from_freelist() routine does two scans. During the
  547. * first scan, we skip zones whose corresponding bit in 'fullzones'
  548. * is set or whose corresponding node in current->mems_allowed (which
  549. * comes from cpusets) is not set. During the second scan, we bypass
  550. * this zonelist_cache, to ensure we look methodically at each zone.
  551. *
  552. * Once per second, we zero out (zap) fullzones, forcing us to
  553. * reconsider nodes that might have regained more free memory.
  554. * The field last_full_zap is the time we last zapped fullzones.
  555. *
  556. * This mechanism reduces the amount of time we waste repeatedly
  557. * reexaming zones for free memory when they just came up low on
  558. * memory momentarilly ago.
  559. *
  560. * The zonelist_cache struct members logically belong in struct
  561. * zonelist. However, the mempolicy zonelists constructed for
  562. * MPOL_BIND are intentionally variable length (and usually much
  563. * shorter). A general purpose mechanism for handling structs with
  564. * multiple variable length members is more mechanism than we want
  565. * here. We resort to some special case hackery instead.
  566. *
  567. * The MPOL_BIND zonelists don't need this zonelist_cache (in good
  568. * part because they are shorter), so we put the fixed length stuff
  569. * at the front of the zonelist struct, ending in a variable length
  570. * zones[], as is needed by MPOL_BIND.
  571. *
  572. * Then we put the optional zonelist cache on the end of the zonelist
  573. * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
  574. * the fixed length portion at the front of the struct. This pointer
  575. * both enables us to find the zonelist cache, and in the case of
  576. * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
  577. * to know that the zonelist cache is not there.
  578. *
  579. * The end result is that struct zonelists come in two flavors:
  580. * 1) The full, fixed length version, shown below, and
  581. * 2) The custom zonelists for MPOL_BIND.
  582. * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
  583. *
  584. * Even though there may be multiple CPU cores on a node modifying
  585. * fullzones or last_full_zap in the same zonelist_cache at the same
  586. * time, we don't lock it. This is just hint data - if it is wrong now
  587. * and then, the allocator will still function, perhaps a bit slower.
  588. */
  589. struct zonelist_cache {
  590. unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
  591. DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
  592. unsigned long last_full_zap; /* when last zap'd (jiffies) */
  593. };
  594. #else
  595. #define MAX_ZONELISTS 1
  596. struct zonelist_cache;
  597. #endif
  598. /*
  599. * This struct contains information about a zone in a zonelist. It is stored
  600. * here to avoid dereferences into large structures and lookups of tables
  601. */
  602. struct zoneref {
  603. struct zone *zone; /* Pointer to actual zone */
  604. int zone_idx; /* zone_idx(zoneref->zone) */
  605. };
  606. /*
  607. * One allocation request operates on a zonelist. A zonelist
  608. * is a list of zones, the first one is the 'goal' of the
  609. * allocation, the other zones are fallback zones, in decreasing
  610. * priority.
  611. *
  612. * If zlcache_ptr is not NULL, then it is just the address of zlcache,
  613. * as explained above. If zlcache_ptr is NULL, there is no zlcache.
  614. * *
  615. * To speed the reading of the zonelist, the zonerefs contain the zone index
  616. * of the entry being read. Helper functions to access information given
  617. * a struct zoneref are
  618. *
  619. * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
  620. * zonelist_zone_idx() - Return the index of the zone for an entry
  621. * zonelist_node_idx() - Return the index of the node for an entry
  622. */
  623. struct zonelist {
  624. struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
  625. struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
  626. #ifdef CONFIG_NUMA
  627. struct zonelist_cache zlcache; // optional ...
  628. #endif
  629. };
  630. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  631. struct node_active_region {
  632. unsigned long start_pfn;
  633. unsigned long end_pfn;
  634. int nid;
  635. };
  636. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  637. #ifndef CONFIG_DISCONTIGMEM
  638. /* The array of struct pages - for discontigmem use pgdat->lmem_map */
  639. extern struct page *mem_map;
  640. #endif
  641. /*
  642. * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
  643. * (mostly NUMA machines?) to denote a higher-level memory zone than the
  644. * zone denotes.
  645. *
  646. * On NUMA machines, each NUMA node would have a pg_data_t to describe
  647. * it's memory layout.
  648. *
  649. * Memory statistics and page replacement data structures are maintained on a
  650. * per-zone basis.
  651. */
  652. struct bootmem_data;
  653. typedef struct pglist_data {
  654. struct zone node_zones[MAX_NR_ZONES];
  655. struct zonelist node_zonelists[MAX_ZONELISTS];
  656. int nr_zones;
  657. #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
  658. struct page *node_mem_map;
  659. #ifdef CONFIG_MEMCG
  660. struct page_cgroup *node_page_cgroup;
  661. #endif
  662. #endif
  663. #ifndef CONFIG_NO_BOOTMEM
  664. struct bootmem_data *bdata;
  665. #endif
  666. #ifdef CONFIG_MEMORY_HOTPLUG
  667. /*
  668. * Must be held any time you expect node_start_pfn, node_present_pages
  669. * or node_spanned_pages stay constant. Holding this will also
  670. * guarantee that any pfn_valid() stays that way.
  671. *
  672. * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
  673. * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
  674. *
  675. * Nests above zone->lock and zone->span_seqlock
  676. */
  677. spinlock_t node_size_lock;
  678. #endif
  679. unsigned long node_start_pfn;
  680. unsigned long node_present_pages; /* total number of physical pages */
  681. unsigned long node_spanned_pages; /* total size of physical page
  682. range, including holes */
  683. int node_id;
  684. nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */
  685. wait_queue_head_t kswapd_wait;
  686. wait_queue_head_t pfmemalloc_wait;
  687. struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
  688. int kswapd_max_order;
  689. enum zone_type classzone_idx;
  690. #ifdef CONFIG_NUMA_BALANCING
  691. /* Lock serializing the migrate rate limiting window */
  692. spinlock_t numabalancing_migrate_lock;
  693. /* Rate limiting time interval */
  694. unsigned long numabalancing_migrate_next_window;
  695. /* Number of pages migrated during the rate limiting time interval */
  696. unsigned long numabalancing_migrate_nr_pages;
  697. #endif
  698. } pg_data_t;
  699. #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
  700. #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
  701. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  702. #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
  703. #else
  704. #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  705. #endif
  706. #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
  707. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  708. #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
  709. static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
  710. {
  711. return pgdat->node_start_pfn + pgdat->node_spanned_pages;
  712. }
  713. static inline bool pgdat_is_empty(pg_data_t *pgdat)
  714. {
  715. return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
  716. }
  717. #include <linux/memory_hotplug.h>
  718. extern struct mutex zonelists_mutex;
  719. void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
  720. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
  721. bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  722. int classzone_idx, int alloc_flags);
  723. bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
  724. int classzone_idx, int alloc_flags);
  725. enum memmap_context {
  726. MEMMAP_EARLY,
  727. MEMMAP_HOTPLUG,
  728. };
  729. extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
  730. unsigned long size,
  731. enum memmap_context context);
  732. extern void lruvec_init(struct lruvec *lruvec);
  733. static inline struct zone *lruvec_zone(struct lruvec *lruvec)
  734. {
  735. #ifdef CONFIG_MEMCG
  736. return lruvec->zone;
  737. #else
  738. return container_of(lruvec, struct zone, lruvec);
  739. #endif
  740. }
  741. #ifdef CONFIG_HAVE_MEMORY_PRESENT
  742. void memory_present(int nid, unsigned long start, unsigned long end);
  743. #else
  744. static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
  745. #endif
  746. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  747. int local_memory_node(int node_id);
  748. #else
  749. static inline int local_memory_node(int node_id) { return node_id; };
  750. #endif
  751. #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
  752. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  753. #endif
  754. /*
  755. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
  756. */
  757. #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
  758. static inline int populated_zone(struct zone *zone)
  759. {
  760. return (!!zone->present_pages);
  761. }
  762. extern int movable_zone;
  763. static inline int zone_movable_is_highmem(void)
  764. {
  765. #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  766. return movable_zone == ZONE_HIGHMEM;
  767. #else
  768. return 0;
  769. #endif
  770. }
  771. static inline int is_highmem_idx(enum zone_type idx)
  772. {
  773. #ifdef CONFIG_HIGHMEM
  774. return (idx == ZONE_HIGHMEM ||
  775. (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
  776. #else
  777. return 0;
  778. #endif
  779. }
  780. /**
  781. * is_highmem - helper function to quickly check if a struct zone is a
  782. * highmem zone or not. This is an attempt to keep references
  783. * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
  784. * @zone - pointer to struct zone variable
  785. */
  786. static inline int is_highmem(struct zone *zone)
  787. {
  788. #ifdef CONFIG_HIGHMEM
  789. int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
  790. return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
  791. (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
  792. zone_movable_is_highmem());
  793. #else
  794. return 0;
  795. #endif
  796. }
  797. /* These two functions are used to setup the per zone pages min values */
  798. struct ctl_table;
  799. int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
  800. void __user *, size_t *, loff_t *);
  801. extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
  802. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
  803. void __user *, size_t *, loff_t *);
  804. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
  805. void __user *, size_t *, loff_t *);
  806. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
  807. void __user *, size_t *, loff_t *);
  808. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
  809. void __user *, size_t *, loff_t *);
  810. extern int numa_zonelist_order_handler(struct ctl_table *, int,
  811. void __user *, size_t *, loff_t *);
  812. extern char numa_zonelist_order[];
  813. #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
  814. #ifndef CONFIG_NEED_MULTIPLE_NODES
  815. extern struct pglist_data contig_page_data;
  816. #define NODE_DATA(nid) (&contig_page_data)
  817. #define NODE_MEM_MAP(nid) mem_map
  818. #else /* CONFIG_NEED_MULTIPLE_NODES */
  819. #include <asm/mmzone.h>
  820. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  821. extern struct pglist_data *first_online_pgdat(void);
  822. extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  823. extern struct zone *next_zone(struct zone *zone);
  824. /**
  825. * for_each_online_pgdat - helper macro to iterate over all online nodes
  826. * @pgdat - pointer to a pg_data_t variable
  827. */
  828. #define for_each_online_pgdat(pgdat) \
  829. for (pgdat = first_online_pgdat(); \
  830. pgdat; \
  831. pgdat = next_online_pgdat(pgdat))
  832. /**
  833. * for_each_zone - helper macro to iterate over all memory zones
  834. * @zone - pointer to struct zone variable
  835. *
  836. * The user only needs to declare the zone variable, for_each_zone
  837. * fills it in.
  838. */
  839. #define for_each_zone(zone) \
  840. for (zone = (first_online_pgdat())->node_zones; \
  841. zone; \
  842. zone = next_zone(zone))
  843. #define for_each_populated_zone(zone) \
  844. for (zone = (first_online_pgdat())->node_zones; \
  845. zone; \
  846. zone = next_zone(zone)) \
  847. if (!populated_zone(zone)) \
  848. ; /* do nothing */ \
  849. else
  850. static inline struct zone *zonelist_zone(struct zoneref *zoneref)
  851. {
  852. return zoneref->zone;
  853. }
  854. static inline int zonelist_zone_idx(struct zoneref *zoneref)
  855. {
  856. return zoneref->zone_idx;
  857. }
  858. static inline int zonelist_node_idx(struct zoneref *zoneref)
  859. {
  860. #ifdef CONFIG_NUMA
  861. /* zone_to_nid not available in this context */
  862. return zoneref->zone->node;
  863. #else
  864. return 0;
  865. #endif /* CONFIG_NUMA */
  866. }
  867. /**
  868. * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
  869. * @z - The cursor used as a starting point for the search
  870. * @highest_zoneidx - The zone index of the highest zone to return
  871. * @nodes - An optional nodemask to filter the zonelist with
  872. * @zone - The first suitable zone found is returned via this parameter
  873. *
  874. * This function returns the next zone at or below a given zone index that is
  875. * within the allowed nodemask using a cursor as the starting point for the
  876. * search. The zoneref returned is a cursor that represents the current zone
  877. * being examined. It should be advanced by one before calling
  878. * next_zones_zonelist again.
  879. */
  880. struct zoneref *next_zones_zonelist(struct zoneref *z,
  881. enum zone_type highest_zoneidx,
  882. nodemask_t *nodes,
  883. struct zone **zone);
  884. /**
  885. * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
  886. * @zonelist - The zonelist to search for a suitable zone
  887. * @highest_zoneidx - The zone index of the highest zone to return
  888. * @nodes - An optional nodemask to filter the zonelist with
  889. * @zone - The first suitable zone found is returned via this parameter
  890. *
  891. * This function returns the first zone at or below a given zone index that is
  892. * within the allowed nodemask. The zoneref returned is a cursor that can be
  893. * used to iterate the zonelist with next_zones_zonelist by advancing it by
  894. * one before calling.
  895. */
  896. static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
  897. enum zone_type highest_zoneidx,
  898. nodemask_t *nodes,
  899. struct zone **zone)
  900. {
  901. return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
  902. zone);
  903. }
  904. /**
  905. * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
  906. * @zone - The current zone in the iterator
  907. * @z - The current pointer within zonelist->zones being iterated
  908. * @zlist - The zonelist being iterated
  909. * @highidx - The zone index of the highest zone to return
  910. * @nodemask - Nodemask allowed by the allocator
  911. *
  912. * This iterator iterates though all zones at or below a given zone index and
  913. * within a given nodemask
  914. */
  915. #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
  916. for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
  917. zone; \
  918. z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
  919. /**
  920. * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
  921. * @zone - The current zone in the iterator
  922. * @z - The current pointer within zonelist->zones being iterated
  923. * @zlist - The zonelist being iterated
  924. * @highidx - The zone index of the highest zone to return
  925. *
  926. * This iterator iterates though all zones at or below a given zone index.
  927. */
  928. #define for_each_zone_zonelist(zone, z, zlist, highidx) \
  929. for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
  930. #ifdef CONFIG_SPARSEMEM
  931. #include <asm/sparsemem.h>
  932. #endif
  933. #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
  934. !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  935. static inline unsigned long early_pfn_to_nid(unsigned long pfn)
  936. {
  937. return 0;
  938. }
  939. #endif
  940. #ifdef CONFIG_FLATMEM
  941. #define pfn_to_nid(pfn) (0)
  942. #endif
  943. #ifdef CONFIG_SPARSEMEM
  944. /*
  945. * SECTION_SHIFT #bits space required to store a section #
  946. *
  947. * PA_SECTION_SHIFT physical address to/from section number
  948. * PFN_SECTION_SHIFT pfn to/from section number
  949. */
  950. #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
  951. #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
  952. #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
  953. #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
  954. #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
  955. #define SECTION_BLOCKFLAGS_BITS \
  956. ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
  957. #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  958. #error Allocator MAX_ORDER exceeds SECTION_SIZE
  959. #endif
  960. #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
  961. #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
  962. #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
  963. #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
  964. struct page;
  965. struct page_cgroup;
  966. struct mem_section {
  967. /*
  968. * This is, logically, a pointer to an array of struct
  969. * pages. However, it is stored with some other magic.
  970. * (see sparse.c::sparse_init_one_section())
  971. *
  972. * Additionally during early boot we encode node id of
  973. * the location of the section here to guide allocation.
  974. * (see sparse.c::memory_present())
  975. *
  976. * Making it a UL at least makes someone do a cast
  977. * before using it wrong.
  978. */
  979. unsigned long section_mem_map;
  980. /* See declaration of similar field in struct zone */
  981. unsigned long *pageblock_flags;
  982. #ifdef CONFIG_MEMCG
  983. /*
  984. * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
  985. * section. (see memcontrol.h/page_cgroup.h about this.)
  986. */
  987. struct page_cgroup *page_cgroup;
  988. unsigned long pad;
  989. #endif
  990. /*
  991. * WARNING: mem_section must be a power-of-2 in size for the
  992. * calculation and use of SECTION_ROOT_MASK to make sense.
  993. */
  994. };
  995. #ifdef CONFIG_SPARSEMEM_EXTREME
  996. #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
  997. #else
  998. #define SECTIONS_PER_ROOT 1
  999. #endif
  1000. #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
  1001. #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
  1002. #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
  1003. #ifdef CONFIG_SPARSEMEM_EXTREME
  1004. extern struct mem_section *mem_section[NR_SECTION_ROOTS];
  1005. #else
  1006. extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  1007. #endif
  1008. static inline struct mem_section *__nr_to_section(unsigned long nr)
  1009. {
  1010. if (!mem_section[SECTION_NR_TO_ROOT(nr)])
  1011. return NULL;
  1012. return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
  1013. }
  1014. extern int __section_nr(struct mem_section* ms);
  1015. extern unsigned long usemap_size(void);
  1016. /*
  1017. * We use the lower bits of the mem_map pointer to store
  1018. * a little bit of information. There should be at least
  1019. * 3 bits here due to 32-bit alignment.
  1020. */
  1021. #define SECTION_MARKED_PRESENT (1UL<<0)
  1022. #define SECTION_HAS_MEM_MAP (1UL<<1)
  1023. #define SECTION_MAP_LAST_BIT (1UL<<2)
  1024. #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
  1025. #define SECTION_NID_SHIFT 2
  1026. static inline struct page *__section_mem_map_addr(struct mem_section *section)
  1027. {
  1028. unsigned long map = section->section_mem_map;
  1029. map &= SECTION_MAP_MASK;
  1030. return (struct page *)map;
  1031. }
  1032. static inline int present_section(struct mem_section *section)
  1033. {
  1034. return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
  1035. }
  1036. static inline int present_section_nr(unsigned long nr)
  1037. {
  1038. return present_section(__nr_to_section(nr));
  1039. }
  1040. static inline int valid_section(struct mem_section *section)
  1041. {
  1042. return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
  1043. }
  1044. static inline int valid_section_nr(unsigned long nr)
  1045. {
  1046. return valid_section(__nr_to_section(nr));
  1047. }
  1048. static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  1049. {
  1050. return __nr_to_section(pfn_to_section_nr(pfn));
  1051. }
  1052. #ifndef CONFIG_HAVE_ARCH_PFN_VALID
  1053. static inline int pfn_valid(unsigned long pfn)
  1054. {
  1055. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1056. return 0;
  1057. return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
  1058. }
  1059. #endif
  1060. static inline int pfn_present(unsigned long pfn)
  1061. {
  1062. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1063. return 0;
  1064. return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
  1065. }
  1066. /*
  1067. * These are _only_ used during initialisation, therefore they
  1068. * can use __initdata ... They could have names to indicate
  1069. * this restriction.
  1070. */
  1071. #ifdef CONFIG_NUMA
  1072. #define pfn_to_nid(pfn) \
  1073. ({ \
  1074. unsigned long __pfn_to_nid_pfn = (pfn); \
  1075. page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
  1076. })
  1077. #else
  1078. #define pfn_to_nid(pfn) (0)
  1079. #endif
  1080. #define early_pfn_valid(pfn) pfn_valid(pfn)
  1081. void sparse_init(void);
  1082. #else
  1083. #define sparse_init() do {} while (0)
  1084. #define sparse_index_init(_sec, _nid) do {} while (0)
  1085. #endif /* CONFIG_SPARSEMEM */
  1086. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  1087. bool early_pfn_in_nid(unsigned long pfn, int nid);
  1088. #else
  1089. #define early_pfn_in_nid(pfn, nid) (1)
  1090. #endif
  1091. #ifndef early_pfn_valid
  1092. #define early_pfn_valid(pfn) (1)
  1093. #endif
  1094. void memory_present(int nid, unsigned long start, unsigned long end);
  1095. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  1096. /*
  1097. * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
  1098. * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
  1099. * pfn_valid_within() should be used in this case; we optimise this away
  1100. * when we have no holes within a MAX_ORDER_NR_PAGES block.
  1101. */
  1102. #ifdef CONFIG_HOLES_IN_ZONE
  1103. #define pfn_valid_within(pfn) pfn_valid(pfn)
  1104. #else
  1105. #define pfn_valid_within(pfn) (1)
  1106. #endif
  1107. #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  1108. /*
  1109. * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
  1110. * associated with it or not. In FLATMEM, it is expected that holes always
  1111. * have valid memmap as long as there is valid PFNs either side of the hole.
  1112. * In SPARSEMEM, it is assumed that a valid section has a memmap for the
  1113. * entire section.
  1114. *
  1115. * However, an ARM, and maybe other embedded architectures in the future
  1116. * free memmap backing holes to save memory on the assumption the memmap is
  1117. * never used. The page_zone linkages are then broken even though pfn_valid()
  1118. * returns true. A walker of the full memmap must then do this additional
  1119. * check to ensure the memmap they are looking at is sane by making sure
  1120. * the zone and PFN linkages are still valid. This is expensive, but walkers
  1121. * of the full memmap are extremely rare.
  1122. */
  1123. int memmap_valid_within(unsigned long pfn,
  1124. struct page *page, struct zone *zone);
  1125. #else
  1126. static inline int memmap_valid_within(unsigned long pfn,
  1127. struct page *page, struct zone *zone)
  1128. {
  1129. return 1;
  1130. }
  1131. #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
  1132. #endif /* !__GENERATING_BOUNDS.H */
  1133. #endif /* !__ASSEMBLY__ */
  1134. #endif /* _LINUX_MMZONE_H */