gfp.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. #ifndef __LINUX_GFP_H
  2. #define __LINUX_GFP_H
  3. #include <linux/mmdebug.h>
  4. #include <linux/mmzone.h>
  5. #include <linux/stddef.h>
  6. #include <linux/linkage.h>
  7. #include <linux/topology.h>
  8. struct vm_area_struct;
  9. /*
  10. * In case of changes, please don't forget to update
  11. * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
  12. */
  13. /* Plain integer GFP bitmasks. Do not use this directly. */
  14. #define ___GFP_DMA 0x01u
  15. #define ___GFP_HIGHMEM 0x02u
  16. #define ___GFP_DMA32 0x04u
  17. #define ___GFP_MOVABLE 0x08u
  18. #define ___GFP_RECLAIMABLE 0x10u
  19. #define ___GFP_HIGH 0x20u
  20. #define ___GFP_IO 0x40u
  21. #define ___GFP_FS 0x80u
  22. #define ___GFP_COLD 0x100u
  23. #define ___GFP_NOWARN 0x200u
  24. #define ___GFP_REPEAT 0x400u
  25. #define ___GFP_NOFAIL 0x800u
  26. #define ___GFP_NORETRY 0x1000u
  27. #define ___GFP_MEMALLOC 0x2000u
  28. #define ___GFP_COMP 0x4000u
  29. #define ___GFP_ZERO 0x8000u
  30. #define ___GFP_NOMEMALLOC 0x10000u
  31. #define ___GFP_HARDWALL 0x20000u
  32. #define ___GFP_THISNODE 0x40000u
  33. #define ___GFP_ATOMIC 0x80000u
  34. #define ___GFP_ACCOUNT 0x100000u
  35. #define ___GFP_NOTRACK 0x200000u
  36. #define ___GFP_DIRECT_RECLAIM 0x400000u
  37. #define ___GFP_OTHER_NODE 0x800000u
  38. #define ___GFP_WRITE 0x1000000u
  39. #define ___GFP_KSWAPD_RECLAIM 0x2000000u
  40. /* If the above are modified, __GFP_BITS_SHIFT may need updating */
  41. /*
  42. * Physical address zone modifiers (see linux/mmzone.h - low four bits)
  43. *
  44. * Do not put any conditional on these. If necessary modify the definitions
  45. * without the underscores and use them consistently. The definitions here may
  46. * be used in bit comparisons.
  47. */
  48. #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
  49. #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
  50. #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
  51. #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
  52. #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
  53. /*
  54. * Page mobility and placement hints
  55. *
  56. * These flags provide hints about how mobile the page is. Pages with similar
  57. * mobility are placed within the same pageblocks to minimise problems due
  58. * to external fragmentation.
  59. *
  60. * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
  61. * moved by page migration during memory compaction or can be reclaimed.
  62. *
  63. * __GFP_RECLAIMABLE is used for slab allocations that specify
  64. * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
  65. *
  66. * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
  67. * these pages will be spread between local zones to avoid all the dirty
  68. * pages being in one zone (fair zone allocation policy).
  69. *
  70. * __GFP_HARDWALL enforces the cpuset memory allocation policy.
  71. *
  72. * __GFP_THISNODE forces the allocation to be satisified from the requested
  73. * node with no fallbacks or placement policy enforcements.
  74. *
  75. * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant
  76. * to kmem allocations).
  77. */
  78. #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
  79. #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
  80. #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
  81. #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
  82. #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
  83. /*
  84. * Watermark modifiers -- controls access to emergency reserves
  85. *
  86. * __GFP_HIGH indicates that the caller is high-priority and that granting
  87. * the request is necessary before the system can make forward progress.
  88. * For example, creating an IO context to clean pages.
  89. *
  90. * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
  91. * high priority. Users are typically interrupt handlers. This may be
  92. * used in conjunction with __GFP_HIGH
  93. *
  94. * __GFP_MEMALLOC allows access to all memory. This should only be used when
  95. * the caller guarantees the allocation will allow more memory to be freed
  96. * very shortly e.g. process exiting or swapping. Users either should
  97. * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
  98. *
  99. * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
  100. * This takes precedence over the __GFP_MEMALLOC flag if both are set.
  101. */
  102. #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
  103. #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
  104. #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
  105. #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
  106. /*
  107. * Reclaim modifiers
  108. *
  109. * __GFP_IO can start physical IO.
  110. *
  111. * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
  112. * allocator recursing into the filesystem which might already be holding
  113. * locks.
  114. *
  115. * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
  116. * This flag can be cleared to avoid unnecessary delays when a fallback
  117. * option is available.
  118. *
  119. * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
  120. * the low watermark is reached and have it reclaim pages until the high
  121. * watermark is reached. A caller may wish to clear this flag when fallback
  122. * options are available and the reclaim is likely to disrupt the system. The
  123. * canonical example is THP allocation where a fallback is cheap but
  124. * reclaim/compaction may cause indirect stalls.
  125. *
  126. * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
  127. *
  128. * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
  129. * _might_ fail. This depends upon the particular VM implementation.
  130. *
  131. * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
  132. * cannot handle allocation failures. New users should be evaluated carefully
  133. * (and the flag should be used only when there is no reasonable failure
  134. * policy) but it is definitely preferable to use the flag rather than
  135. * opencode endless loop around allocator.
  136. *
  137. * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
  138. * return NULL when direct reclaim and memory compaction have failed to allow
  139. * the allocation to succeed. The OOM killer is not called with the current
  140. * implementation.
  141. */
  142. #define __GFP_IO ((__force gfp_t)___GFP_IO)
  143. #define __GFP_FS ((__force gfp_t)___GFP_FS)
  144. #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
  145. #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
  146. #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
  147. #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
  148. #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
  149. #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
  150. /*
  151. * Action modifiers
  152. *
  153. * __GFP_COLD indicates that the caller does not expect to be used in the near
  154. * future. Where possible, a cache-cold page will be returned.
  155. *
  156. * __GFP_NOWARN suppresses allocation failure reports.
  157. *
  158. * __GFP_COMP address compound page metadata.
  159. *
  160. * __GFP_ZERO returns a zeroed page on success.
  161. *
  162. * __GFP_NOTRACK avoids tracking with kmemcheck.
  163. *
  164. * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
  165. * distinguishing in the source between false positives and allocations that
  166. * cannot be supported (e.g. page tables).
  167. *
  168. * __GFP_OTHER_NODE is for allocations that are on a remote node but that
  169. * should not be accounted for as a remote allocation in vmstat. A
  170. * typical user would be khugepaged collapsing a huge page on a remote
  171. * node.
  172. */
  173. #define __GFP_COLD ((__force gfp_t)___GFP_COLD)
  174. #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
  175. #define __GFP_COMP ((__force gfp_t)___GFP_COMP)
  176. #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
  177. #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
  178. #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
  179. #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
  180. /* Room for N __GFP_FOO bits */
  181. #define __GFP_BITS_SHIFT 26
  182. #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
  183. /*
  184. * Useful GFP flag combinations that are commonly used. It is recommended
  185. * that subsystems start with one of these combinations and then set/clear
  186. * __GFP_FOO flags as necessary.
  187. *
  188. * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
  189. * watermark is applied to allow access to "atomic reserves"
  190. *
  191. * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
  192. * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
  193. *
  194. * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
  195. * accounted to kmemcg.
  196. *
  197. * GFP_NOWAIT is for kernel allocations that should not stall for direct
  198. * reclaim, start physical IO or use any filesystem callback.
  199. *
  200. * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
  201. * that do not require the starting of any physical IO.
  202. *
  203. * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
  204. *
  205. * GFP_USER is for userspace allocations that also need to be directly
  206. * accessibly by the kernel or hardware. It is typically used by hardware
  207. * for buffers that are mapped to userspace (e.g. graphics) that hardware
  208. * still must DMA to. cpuset limits are enforced for these allocations.
  209. *
  210. * GFP_DMA exists for historical reasons and should be avoided where possible.
  211. * The flags indicates that the caller requires that the lowest zone be
  212. * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
  213. * it would require careful auditing as some users really require it and
  214. * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
  215. * lowest zone as a type of emergency reserve.
  216. *
  217. * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
  218. * address.
  219. *
  220. * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
  221. * do not need to be directly accessible by the kernel but that cannot
  222. * move once in use. An example may be a hardware allocation that maps
  223. * data directly into userspace but has no addressing limitations.
  224. *
  225. * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
  226. * need direct access to but can use kmap() when access is required. They
  227. * are expected to be movable via page reclaim or page migration. Typically,
  228. * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
  229. *
  230. * GFP_TRANSHUGE is used for THP allocations. They are compound allocations
  231. * that will fail quickly if memory is not available and will not wake
  232. * kswapd on failure.
  233. */
  234. #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
  235. #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
  236. #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
  237. #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
  238. #define GFP_NOIO (__GFP_RECLAIM)
  239. #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
  240. #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
  241. __GFP_RECLAIMABLE)
  242. #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
  243. #define GFP_DMA __GFP_DMA
  244. #define GFP_DMA32 __GFP_DMA32
  245. #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
  246. #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
  247. #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
  248. __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
  249. ~__GFP_RECLAIM)
  250. /* Convert GFP flags to their corresponding migrate type */
  251. #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
  252. #define GFP_MOVABLE_SHIFT 3
  253. static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
  254. {
  255. VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
  256. BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
  257. BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
  258. if (unlikely(page_group_by_mobility_disabled))
  259. return MIGRATE_UNMOVABLE;
  260. /* Group based on mobility */
  261. return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
  262. }
  263. #undef GFP_MOVABLE_MASK
  264. #undef GFP_MOVABLE_SHIFT
  265. static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
  266. {
  267. return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
  268. }
  269. #ifdef CONFIG_HIGHMEM
  270. #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
  271. #else
  272. #define OPT_ZONE_HIGHMEM ZONE_NORMAL
  273. #endif
  274. #ifdef CONFIG_ZONE_DMA
  275. #define OPT_ZONE_DMA ZONE_DMA
  276. #else
  277. #define OPT_ZONE_DMA ZONE_NORMAL
  278. #endif
  279. #ifdef CONFIG_ZONE_DMA32
  280. #define OPT_ZONE_DMA32 ZONE_DMA32
  281. #else
  282. #define OPT_ZONE_DMA32 ZONE_NORMAL
  283. #endif
  284. /*
  285. * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
  286. * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
  287. * and there are 16 of them to cover all possible combinations of
  288. * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
  289. *
  290. * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
  291. * But GFP_MOVABLE is not only a zone specifier but also an allocation
  292. * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
  293. * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
  294. *
  295. * bit result
  296. * =================
  297. * 0x0 => NORMAL
  298. * 0x1 => DMA or NORMAL
  299. * 0x2 => HIGHMEM or NORMAL
  300. * 0x3 => BAD (DMA+HIGHMEM)
  301. * 0x4 => DMA32 or DMA or NORMAL
  302. * 0x5 => BAD (DMA+DMA32)
  303. * 0x6 => BAD (HIGHMEM+DMA32)
  304. * 0x7 => BAD (HIGHMEM+DMA32+DMA)
  305. * 0x8 => NORMAL (MOVABLE+0)
  306. * 0x9 => DMA or NORMAL (MOVABLE+DMA)
  307. * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
  308. * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
  309. * 0xc => DMA32 (MOVABLE+DMA32)
  310. * 0xd => BAD (MOVABLE+DMA32+DMA)
  311. * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
  312. * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
  313. *
  314. * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
  315. */
  316. #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
  317. /* ZONE_DEVICE is not a valid GFP zone specifier */
  318. #define GFP_ZONES_SHIFT 2
  319. #else
  320. #define GFP_ZONES_SHIFT ZONES_SHIFT
  321. #endif
  322. #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
  323. #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
  324. #endif
  325. #define GFP_ZONE_TABLE ( \
  326. (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
  327. | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
  328. | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
  329. | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
  330. | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
  331. | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
  332. | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
  333. | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
  334. )
  335. /*
  336. * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
  337. * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
  338. * entry starting with bit 0. Bit is set if the combination is not
  339. * allowed.
  340. */
  341. #define GFP_ZONE_BAD ( \
  342. 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
  343. | 1 << (___GFP_DMA | ___GFP_DMA32) \
  344. | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
  345. | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
  346. | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
  347. | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
  348. | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
  349. | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
  350. )
  351. static inline enum zone_type gfp_zone(gfp_t flags)
  352. {
  353. enum zone_type z;
  354. int bit = (__force int) (flags & GFP_ZONEMASK);
  355. z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
  356. ((1 << GFP_ZONES_SHIFT) - 1);
  357. VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
  358. return z;
  359. }
  360. /*
  361. * There is only one page-allocator function, and two main namespaces to
  362. * it. The alloc_page*() variants return 'struct page *' and as such
  363. * can allocate highmem pages, the *get*page*() variants return
  364. * virtual kernel addresses to the allocated page(s).
  365. */
  366. static inline int gfp_zonelist(gfp_t flags)
  367. {
  368. #ifdef CONFIG_NUMA
  369. if (unlikely(flags & __GFP_THISNODE))
  370. return ZONELIST_NOFALLBACK;
  371. #endif
  372. return ZONELIST_FALLBACK;
  373. }
  374. /*
  375. * We get the zone list from the current node and the gfp_mask.
  376. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
  377. * There are two zonelists per node, one for all zones with memory and
  378. * one containing just zones from the node the zonelist belongs to.
  379. *
  380. * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
  381. * optimized to &contig_page_data at compile-time.
  382. */
  383. static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
  384. {
  385. return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
  386. }
  387. #ifndef HAVE_ARCH_FREE_PAGE
  388. static inline void arch_free_page(struct page *page, int order) { }
  389. #endif
  390. #ifndef HAVE_ARCH_ALLOC_PAGE
  391. static inline void arch_alloc_page(struct page *page, int order) { }
  392. #endif
  393. struct page *
  394. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  395. struct zonelist *zonelist, nodemask_t *nodemask);
  396. static inline struct page *
  397. __alloc_pages(gfp_t gfp_mask, unsigned int order,
  398. struct zonelist *zonelist)
  399. {
  400. return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
  401. }
  402. /*
  403. * Allocate pages, preferring the node given as nid. The node must be valid and
  404. * online. For more general interface, see alloc_pages_node().
  405. */
  406. static inline struct page *
  407. __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
  408. {
  409. VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
  410. VM_WARN_ON(!node_online(nid));
  411. return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
  412. }
  413. /*
  414. * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
  415. * prefer the current CPU's closest node. Otherwise node must be valid and
  416. * online.
  417. */
  418. static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
  419. unsigned int order)
  420. {
  421. if (nid == NUMA_NO_NODE)
  422. nid = numa_mem_id();
  423. return __alloc_pages_node(nid, gfp_mask, order);
  424. }
  425. #ifdef CONFIG_NUMA
  426. extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
  427. static inline struct page *
  428. alloc_pages(gfp_t gfp_mask, unsigned int order)
  429. {
  430. return alloc_pages_current(gfp_mask, order);
  431. }
  432. extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
  433. struct vm_area_struct *vma, unsigned long addr,
  434. int node, bool hugepage);
  435. #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
  436. alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
  437. #else
  438. #define alloc_pages(gfp_mask, order) \
  439. alloc_pages_node(numa_node_id(), gfp_mask, order)
  440. #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
  441. alloc_pages(gfp_mask, order)
  442. #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
  443. alloc_pages(gfp_mask, order)
  444. #endif
  445. #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
  446. #define alloc_page_vma(gfp_mask, vma, addr) \
  447. alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
  448. #define alloc_page_vma_node(gfp_mask, vma, addr, node) \
  449. alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
  450. extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
  451. extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
  452. unsigned int order);
  453. extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
  454. extern unsigned long get_zeroed_page(gfp_t gfp_mask);
  455. void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
  456. void free_pages_exact(void *virt, size_t size);
  457. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
  458. #define __get_free_page(gfp_mask) \
  459. __get_free_pages((gfp_mask), 0)
  460. #define __get_dma_pages(gfp_mask, order) \
  461. __get_free_pages((gfp_mask) | GFP_DMA, (order))
  462. extern void __free_pages(struct page *page, unsigned int order);
  463. extern void free_pages(unsigned long addr, unsigned int order);
  464. extern void free_hot_cold_page(struct page *page, bool cold);
  465. extern void free_hot_cold_page_list(struct list_head *list, bool cold);
  466. struct page_frag_cache;
  467. extern void *__alloc_page_frag(struct page_frag_cache *nc,
  468. unsigned int fragsz, gfp_t gfp_mask);
  469. extern void __free_page_frag(void *addr);
  470. extern void __free_kmem_pages(struct page *page, unsigned int order);
  471. extern void free_kmem_pages(unsigned long addr, unsigned int order);
  472. #define __free_page(page) __free_pages((page), 0)
  473. #define free_page(addr) free_pages((addr), 0)
  474. void page_alloc_init(void);
  475. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
  476. void drain_all_pages(struct zone *zone);
  477. void drain_local_pages(struct zone *zone);
  478. void page_alloc_init_late(void);
  479. /*
  480. * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
  481. * GFP flags are used before interrupts are enabled. Once interrupts are
  482. * enabled, it is set to __GFP_BITS_MASK while the system is running. During
  483. * hibernation, it is used by PM to avoid I/O during memory allocation while
  484. * devices are suspended.
  485. */
  486. extern gfp_t gfp_allowed_mask;
  487. /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
  488. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
  489. extern void pm_restrict_gfp_mask(void);
  490. extern void pm_restore_gfp_mask(void);
  491. #ifdef CONFIG_PM_SLEEP
  492. extern bool pm_suspended_storage(void);
  493. #else
  494. static inline bool pm_suspended_storage(void)
  495. {
  496. return false;
  497. }
  498. #endif /* CONFIG_PM_SLEEP */
  499. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  500. /* The below functions must be run on a range from a single zone. */
  501. extern int alloc_contig_range(unsigned long start, unsigned long end,
  502. unsigned migratetype);
  503. extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
  504. #endif
  505. #ifdef CONFIG_CMA
  506. /* CMA stuff */
  507. extern void init_cma_reserved_pageblock(struct page *page);
  508. #endif
  509. #endif /* __LINUX_GFP_H */