|
@@ -395,13 +395,13 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
|
|
|
* @cma: Contiguous memory region for which the allocation is performed.
|
|
|
* @count: Requested number of pages.
|
|
|
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
|
|
- * @gfp_mask: GFP mask to use during compaction
|
|
|
+ * @no_warn: Avoid printing message about failed allocation
|
|
|
*
|
|
|
* This function allocates part of contiguous memory on specific
|
|
|
* contiguous memory area.
|
|
|
*/
|
|
|
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
|
- gfp_t gfp_mask)
|
|
|
+ bool no_warn)
|
|
|
{
|
|
|
unsigned long mask, offset;
|
|
|
unsigned long pfn = -1;
|
|
@@ -447,7 +447,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
|
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
|
|
|
mutex_lock(&cma_mutex);
|
|
|
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
|
|
|
- gfp_mask);
|
|
|
+ GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
|
|
|
mutex_unlock(&cma_mutex);
|
|
|
if (ret == 0) {
|
|
|
page = pfn_to_page(pfn);
|
|
@@ -466,7 +466,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
|
|
|
|
trace_cma_alloc(pfn, page, count, align);
|
|
|
|
|
|
- if (ret && !(gfp_mask & __GFP_NOWARN)) {
|
|
|
+ if (ret && !no_warn) {
|
|
|
pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
|
|
|
__func__, count, ret);
|
|
|
cma_debug_show_areas(cma);
|