|
@@ -929,7 +929,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
|
|
struct ttm_operation_ctx *ctx)
|
|
|
{
|
|
|
struct ttm_tt *ttm = &ttm_dma->ttm;
|
|
|
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
|
|
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
|
|
unsigned long num_pages = ttm->num_pages;
|
|
|
struct dma_pool *pool;
|
|
|
struct dma_page *d_page;
|
|
@@ -1031,6 +1031,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
|
|
|
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
|
{
|
|
|
struct ttm_tt *ttm = &ttm_dma->ttm;
|
|
|
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
|
|
struct dma_pool *pool;
|
|
|
struct dma_page *d_page, *next;
|
|
|
enum pool_type type;
|
|
@@ -1051,8 +1052,8 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
|
|
|
|
count++;
|
|
|
if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
|
|
|
- ttm_mem_global_free_page(ttm->glob->mem_glob,
|
|
|
- d_page->p, pool->size);
|
|
|
+ ttm_mem_global_free_page(mem_glob, d_page->p,
|
|
|
+ pool->size);
|
|
|
d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
|
|
|
}
|
|
|
ttm_dma_page_put(pool, d_page);
|
|
@@ -1080,8 +1081,8 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
|
count++;
|
|
|
|
|
|
if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
|
|
|
- ttm_mem_global_free_page(ttm->glob->mem_glob,
|
|
|
- d_page->p, pool->size);
|
|
|
+ ttm_mem_global_free_page(mem_glob, d_page->p,
|
|
|
+ pool->size);
|
|
|
d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
|
|
|
}
|
|
|
|