nouveau_sgdma.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. #include <linux/pagemap.h>
  2. #include <linux/slab.h>
  3. #include <subdev/fb.h>
  4. #include "nouveau_drm.h"
  5. #include "nouveau_ttm.h"
  6. struct nouveau_sgdma_be {
  7. /* this has to be the first field so populate/unpopulated in
  8. * nouve_bo.c works properly, otherwise have to move them here
  9. */
  10. struct ttm_dma_tt ttm;
  11. struct drm_device *dev;
  12. struct nouveau_mem *node;
  13. };
  14. static void
  15. nouveau_sgdma_destroy(struct ttm_tt *ttm)
  16. {
  17. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  18. if (ttm) {
  19. ttm_dma_tt_fini(&nvbe->ttm);
  20. kfree(nvbe);
  21. }
  22. }
  23. static int
  24. nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  25. {
  26. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  27. struct nouveau_mem *node = mem->mm_node;
  28. if (ttm->sg) {
  29. node->sg = ttm->sg;
  30. node->pages = NULL;
  31. } else {
  32. node->sg = NULL;
  33. node->pages = nvbe->ttm.dma_address;
  34. }
  35. node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  36. nouveau_vm_map(&node->vma[0], node);
  37. nvbe->node = node;
  38. return 0;
  39. }
  40. static int
  41. nv04_sgdma_unbind(struct ttm_tt *ttm)
  42. {
  43. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  44. nouveau_vm_unmap(&nvbe->node->vma[0]);
  45. return 0;
  46. }
  47. static struct ttm_backend_func nv04_sgdma_backend = {
  48. .bind = nv04_sgdma_bind,
  49. .unbind = nv04_sgdma_unbind,
  50. .destroy = nouveau_sgdma_destroy
  51. };
  52. static int
  53. nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  54. {
  55. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  56. struct nouveau_mem *node = mem->mm_node;
  57. /* noop: bound in move_notify() */
  58. if (ttm->sg) {
  59. node->sg = ttm->sg;
  60. node->pages = NULL;
  61. } else {
  62. node->sg = NULL;
  63. node->pages = nvbe->ttm.dma_address;
  64. }
  65. node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  66. return 0;
  67. }
  68. static int
  69. nv50_sgdma_unbind(struct ttm_tt *ttm)
  70. {
  71. /* noop: unbound in move_notify() */
  72. return 0;
  73. }
  74. static struct ttm_backend_func nv50_sgdma_backend = {
  75. .bind = nv50_sgdma_bind,
  76. .unbind = nv50_sgdma_unbind,
  77. .destroy = nouveau_sgdma_destroy
  78. };
  79. struct ttm_tt *
  80. nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
  81. unsigned long size, uint32_t page_flags,
  82. struct page *dummy_read_page)
  83. {
  84. struct nouveau_drm *drm = nouveau_bdev(bdev);
  85. struct nouveau_sgdma_be *nvbe;
  86. nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
  87. if (!nvbe)
  88. return NULL;
  89. nvbe->dev = drm->dev;
  90. if (nv_device(drm->device)->card_type < NV_50)
  91. nvbe->ttm.ttm.func = &nv04_sgdma_backend;
  92. else
  93. nvbe->ttm.ttm.func = &nv50_sgdma_backend;
  94. if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
  95. return NULL;
  96. return &nvbe->ttm.ttm;
  97. }