|
@@ -926,7 +926,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
|
|
|
{
|
|
|
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
|
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
|
- unsigned i;
|
|
|
int r;
|
|
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
|
|
|
|
@@ -958,27 +957,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- r = ttm_pool_populate(ttm);
|
|
|
- if (r) {
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- for (i = 0; i < ttm->num_pages; i++) {
|
|
|
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
|
|
|
- 0, PAGE_SIZE,
|
|
|
- PCI_DMA_BIDIRECTIONAL);
|
|
|
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
|
|
|
- while (i--) {
|
|
|
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
|
|
|
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
- gtt->ttm.dma_address[i] = 0;
|
|
|
- }
|
|
|
- ttm_pool_unpopulate(ttm);
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- r = 0;
|
|
|
+ r = ttm_populate_and_map_pages(adev->dev, >t->ttm);
|
|
|
trace_mappings:
|
|
|
if (likely(!r))
|
|
|
amdgpu_trace_dma_map(ttm);
|
|
@@ -989,7 +968,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
{
|
|
|
struct amdgpu_device *adev;
|
|
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
|
- unsigned i;
|
|
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
|
|
|
|
|
if (gtt && gtt->userptr) {
|
|
@@ -1012,14 +990,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- for (i = 0; i < ttm->num_pages; i++) {
|
|
|
- if (gtt->ttm.dma_address[i]) {
|
|
|
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
|
|
|
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- ttm_pool_unpopulate(ttm);
|
|
|
+ ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
|
|
|
}
|
|
|
|
|
|
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|