|
@@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
|
|
|
|
|
|
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
|
|
|
{
|
|
|
- struct page *page;
|
|
|
+ struct pagevec *pvec = &vm->free_pages;
|
|
|
|
|
|
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
|
|
|
i915_gem_shrink_all(vm->i915);
|
|
|
|
|
|
- if (vm->free_pages.nr)
|
|
|
- return vm->free_pages.pages[--vm->free_pages.nr];
|
|
|
+ if (likely(pvec->nr))
|
|
|
+ return pvec->pages[--pvec->nr];
|
|
|
+
|
|
|
+ if (!vm->pt_kmap_wc)
|
|
|
+ return alloc_page(gfp);
|
|
|
+
|
|
|
+ /* A placeholder for a specific mutex to guard the WC stash */
|
|
|
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
|
|
+
|
|
|
+ /* Look in our global stash of WC pages... */
|
|
|
+ pvec = &vm->i915->mm.wc_stash;
|
|
|
+ if (likely(pvec->nr))
|
|
|
+ return pvec->pages[--pvec->nr];
|
|
|
|
|
|
- page = alloc_page(gfp);
|
|
|
- if (!page)
|
|
|
+ /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
|
|
|
+ do {
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ page = alloc_page(gfp);
|
|
|
+ if (unlikely(!page))
|
|
|
+ break;
|
|
|
+
|
|
|
+ pvec->pages[pvec->nr++] = page;
|
|
|
+ } while (pagevec_space(pvec));
|
|
|
+
|
|
|
+ if (unlikely(!pvec->nr))
|
|
|
return NULL;
|
|
|
|
|
|
- if (vm->pt_kmap_wc)
|
|
|
- set_pages_array_wc(&page, 1);
|
|
|
+ set_pages_array_wc(pvec->pages, pvec->nr);
|
|
|
|
|
|
- return page;
|
|
|
+ return pvec->pages[--pvec->nr];
|
|
|
}
|
|
|
|
|
|
-static void vm_free_pages_release(struct i915_address_space *vm)
|
|
|
+static void vm_free_pages_release(struct i915_address_space *vm,
|
|
|
+ bool immediate)
|
|
|
{
|
|
|
- GEM_BUG_ON(!pagevec_count(&vm->free_pages));
|
|
|
+ struct pagevec *pvec = &vm->free_pages;
|
|
|
+
|
|
|
+ GEM_BUG_ON(!pagevec_count(pvec));
|
|
|
+
|
|
|
+ if (vm->pt_kmap_wc) {
|
|
|
+ struct pagevec *stash = &vm->i915->mm.wc_stash;
|
|
|
|
|
|
- if (vm->pt_kmap_wc)
|
|
|
- set_pages_array_wb(vm->free_pages.pages,
|
|
|
- pagevec_count(&vm->free_pages));
|
|
|
+ /* When we use WC, first fill up the global stash and then
|
|
|
+ * only if full immediately free the overflow.
|
|
|
+ */
|
|
|
+
|
|
|
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
|
|
+ if (pagevec_space(stash)) {
|
|
|
+ do {
|
|
|
+ stash->pages[stash->nr++] =
|
|
|
+ pvec->pages[--pvec->nr];
|
|
|
+ if (!pvec->nr)
|
|
|
+ return;
|
|
|
+ } while (pagevec_space(stash));
|
|
|
+
|
|
|
+ /* As we have made some room in the VM's free_pages,
|
|
|
+ * we can wait for it to fill again. Unless we are
|
|
|
+ * inside i915_address_space_fini() and must
|
|
|
+ * immediately release the pages!
|
|
|
+ */
|
|
|
+ if (!immediate)
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ set_pages_array_wb(pvec->pages, pvec->nr);
|
|
|
+ }
|
|
|
|
|
|
- __pagevec_release(&vm->free_pages);
|
|
|
+ __pagevec_release(pvec);
|
|
|
}
|
|
|
|
|
|
static void vm_free_page(struct i915_address_space *vm, struct page *page)
|
|
|
{
|
|
|
if (!pagevec_add(&vm->free_pages, page))
|
|
|
- vm_free_pages_release(vm);
|
|
|
+ vm_free_pages_release(vm, false);
|
|
|
}
|
|
|
|
|
|
static int __setup_page_dma(struct i915_address_space *vm,
|
|
@@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm,
|
|
|
static int
|
|
|
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
|
|
|
{
|
|
|
- return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
|
|
|
+ struct page *page;
|
|
|
+ dma_addr_t addr;
|
|
|
+
|
|
|
+ page = alloc_page(gfp | __GFP_ZERO);
|
|
|
+ if (unlikely(!page))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
|
|
|
+ PCI_DMA_BIDIRECTIONAL);
|
|
|
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
|
|
|
+ __free_page(page);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ vm->scratch_page.page = page;
|
|
|
+ vm->scratch_page.daddr = addr;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void cleanup_scratch_page(struct i915_address_space *vm)
|
|
|
{
|
|
|
- cleanup_page_dma(vm, &vm->scratch_page);
|
|
|
+ struct i915_page_dma *p = &vm->scratch_page;
|
|
|
+
|
|
|
+ dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
+ __free_page(p->page);
|
|
|
}
|
|
|
|
|
|
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
|
|
@@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|
|
1ULL << 48 :
|
|
|
1ULL << 32;
|
|
|
|
|
|
- ret = gen8_init_scratch(&ppgtt->base);
|
|
|
- if (ret) {
|
|
|
- ppgtt->base.total = 0;
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
/* There are only few exceptions for gen >=6. chv and bxt.
|
|
|
* And we are not sure about the latter so play safe for now.
|
|
|
*/
|
|
|
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
|
|
|
ppgtt->base.pt_kmap_wc = true;
|
|
|
|
|
|
+ ret = gen8_init_scratch(&ppgtt->base);
|
|
|
+ if (ret) {
|
|
|
+ ppgtt->base.total = 0;
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
if (use_4lvl(vm)) {
|
|
|
ret = setup_px(&ppgtt->base, &ppgtt->pml4);
|
|
|
if (ret)
|
|
@@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
|
|
static void i915_address_space_fini(struct i915_address_space *vm)
|
|
|
{
|
|
|
if (pagevec_count(&vm->free_pages))
|
|
|
- vm_free_pages_release(vm);
|
|
|
+ vm_free_pages_release(vm, true);
|
|
|
|
|
|
i915_gem_timeline_fini(&vm->timeline);
|
|
|
drm_mm_takedown(&vm->mm);
|
|
@@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
|
|
|
{
|
|
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
|
|
struct i915_vma *vma, *vn;
|
|
|
+ struct pagevec *pvec;
|
|
|
|
|
|
ggtt->base.closed = true;
|
|
|
|
|
@@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
|
|
|
}
|
|
|
|
|
|
ggtt->base.cleanup(&ggtt->base);
|
|
|
+
|
|
|
+ pvec = &dev_priv->mm.wc_stash;
|
|
|
+ if (pvec->nr) {
|
|
|
+ set_pages_array_wb(pvec->pages, pvec->nr);
|
|
|
+ __pagevec_release(pvec);
|
|
|
+ }
|
|
|
+
|
|
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
arch_phys_wc_del(ggtt->mtrr);
|