|
@@ -1135,13 +1135,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
|
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
|
|
|
|
|
|
while (count) {
|
|
|
- int j, order = __fls(count);
|
|
|
+ int j, order;
|
|
|
+
|
|
|
+ for (order = __fls(count); order > 0; --order) {
|
|
|
+ /*
|
|
|
+ * We do not want OOM killer to be invoked as long
|
|
|
+ * as we can fall back to single pages, so we force
|
|
|
+ * __GFP_NORETRY for orders higher than zero.
|
|
|
+ */
|
|
|
+ pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
|
|
|
+ if (pages[i])
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- pages[i] = alloc_pages(gfp, order);
|
|
|
- while (!pages[i] && order)
|
|
|
- pages[i] = alloc_pages(gfp, --order);
|
|
|
- if (!pages[i])
|
|
|
- goto error;
|
|
|
+ if (!pages[i]) {
|
|
|
+ /*
|
|
|
+ * Fall back to single page allocation.
|
|
|
+ * Might invoke OOM killer as last resort.
|
|
|
+ */
|
|
|
+ pages[i] = alloc_pages(gfp, 0);
|
|
|
+ if (!pages[i])
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
|
|
|
if (order) {
|
|
|
split_page(pages[i], order);
|