mmu.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. #ifndef __NVKM_MMU_H__
  2. #define __NVKM_MMU_H__
  3. #include <core/subdev.h>
  4. struct nvkm_vma {
  5. struct list_head head;
  6. struct rb_node tree;
  7. u64 addr;
  8. u64 size:50;
  9. bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
  10. bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
  11. #define NVKM_VMA_PAGE_NONE 7
  12. u8 page:3; /* Requested page type (index, or NONE for automatic). */
  13. u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
  14. bool used:1; /* Region allocated. */
  15. bool part:1; /* Region was split from an allocated region by map(). */
  16. bool user:1; /* Region user-allocated. */
  17. bool busy:1; /* Region busy (for temporarily preventing user access). */
  18. struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
  19. struct nvkm_tags *tags; /* Compression tag reference. */
  20. };
  21. struct nvkm_vmm {
  22. const struct nvkm_vmm_func *func;
  23. struct nvkm_mmu *mmu;
  24. const char *name;
  25. u32 debug;
  26. struct kref kref;
  27. struct mutex mutex;
  28. u64 start;
  29. u64 limit;
  30. struct nvkm_vmm_pt *pd;
  31. struct list_head join;
  32. struct list_head list;
  33. struct rb_root free;
  34. struct rb_root root;
  35. bool bootstrapped;
  36. atomic_t engref[NVKM_SUBDEV_NR];
  37. dma_addr_t null;
  38. void *nullp;
  39. };
  40. int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
  41. struct lock_class_key *, const char *name, struct nvkm_vmm **);
  42. struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
  43. void nvkm_vmm_unref(struct nvkm_vmm **);
  44. int nvkm_vmm_boot(struct nvkm_vmm *);
  45. int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
  46. void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
  47. int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
  48. void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
  49. struct nvkm_vmm_map {
  50. struct nvkm_memory *memory;
  51. u64 offset;
  52. struct nvkm_mm_node *mem;
  53. struct scatterlist *sgl;
  54. dma_addr_t *dma;
  55. u64 off;
  56. const struct nvkm_vmm_page *page;
  57. struct nvkm_tags *tags;
  58. u64 next;
  59. u64 type;
  60. u64 ctag;
  61. };
  62. int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
  63. struct nvkm_vmm_map *);
  64. void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
  65. struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
  66. struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
  67. struct nvkm_mmu {
  68. const struct nvkm_mmu_func *func;
  69. struct nvkm_subdev subdev;
  70. u8 dma_bits;
  71. int heap_nr;
  72. struct {
  73. #define NVKM_MEM_VRAM 0x01
  74. #define NVKM_MEM_HOST 0x02
  75. #define NVKM_MEM_COMP 0x04
  76. #define NVKM_MEM_DISP 0x08
  77. u8 type;
  78. u64 size;
  79. } heap[4];
  80. int type_nr;
  81. struct {
  82. #define NVKM_MEM_KIND 0x10
  83. #define NVKM_MEM_MAPPABLE 0x20
  84. #define NVKM_MEM_COHERENT 0x40
  85. #define NVKM_MEM_UNCACHED 0x80
  86. u8 type;
  87. u8 heap;
  88. } type[16];
  89. struct nvkm_vmm *vmm;
  90. struct {
  91. struct mutex mutex;
  92. struct list_head list;
  93. } ptc, ptp;
  94. struct nvkm_device_oclass user;
  95. };
  96. int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  97. int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  98. int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  99. int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  100. int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  101. int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  102. int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  103. int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  104. int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  105. int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  106. int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  107. int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  108. int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  109. #endif