|
@@ -1122,6 +1122,9 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
|
|
|
|
+static const int iommu_order_array[] = { 9, 8, 4, 0 };
|
|
|
|
+
|
|
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
gfp_t gfp, struct dma_attrs *attrs)
|
|
gfp_t gfp, struct dma_attrs *attrs)
|
|
{
|
|
{
|
|
@@ -1129,6 +1132,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
int count = size >> PAGE_SHIFT;
|
|
int count = size >> PAGE_SHIFT;
|
|
int array_size = count * sizeof(struct page *);
|
|
int array_size = count * sizeof(struct page *);
|
|
int i = 0;
|
|
int i = 0;
|
|
|
|
+ int order_idx = 0;
|
|
|
|
|
|
if (array_size <= PAGE_SIZE)
|
|
if (array_size <= PAGE_SIZE)
|
|
pages = kzalloc(array_size, GFP_KERNEL);
|
|
pages = kzalloc(array_size, GFP_KERNEL);
|
|
@@ -1162,22 +1166,24 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
while (count) {
|
|
while (count) {
|
|
int j, order;
|
|
int j, order;
|
|
|
|
|
|
- for (order = __fls(count); order > 0; --order) {
|
|
|
|
- /*
|
|
|
|
- * We do not want OOM killer to be invoked as long
|
|
|
|
- * as we can fall back to single pages, so we force
|
|
|
|
- * __GFP_NORETRY for orders higher than zero.
|
|
|
|
- */
|
|
|
|
- pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
|
|
|
|
- if (pages[i])
|
|
|
|
- break;
|
|
|
|
|
|
+ order = iommu_order_array[order_idx];
|
|
|
|
+
|
|
|
|
+ /* Drop down when we get small */
|
|
|
|
+ if (__fls(count) < order) {
|
|
|
|
+ order_idx++;
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!pages[i]) {
|
|
|
|
- /*
|
|
|
|
- * Fall back to single page allocation.
|
|
|
|
- * Might invoke OOM killer as last resort.
|
|
|
|
- */
|
|
|
|
|
|
+ if (order) {
|
|
|
|
+ /* See if it's easy to allocate a high-order chunk */
|
|
|
|
+ pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
|
|
|
|
+
|
|
|
|
+ /* Go down a notch at first sign of pressure */
|
|
|
|
+ if (!pages[i]) {
|
|
|
|
+ order_idx++;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
pages[i] = alloc_pages(gfp, 0);
|
|
pages[i] = alloc_pages(gfp, 0);
|
|
if (!pages[i])
|
|
if (!pages[i])
|
|
goto error;
|
|
goto error;
|