mmu.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NVKM_MMU_H__
  3. #define __NVKM_MMU_H__
  4. #include <core/subdev.h>
  5. struct nvkm_vma {
  6. struct list_head head;
  7. struct rb_node tree;
  8. u64 addr;
  9. u64 size:50;
  10. bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
  11. bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
  12. #define NVKM_VMA_PAGE_NONE 7
  13. u8 page:3; /* Requested page type (index, or NONE for automatic). */
  14. u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
  15. bool used:1; /* Region allocated. */
  16. bool part:1; /* Region was split from an allocated region by map(). */
  17. bool user:1; /* Region user-allocated. */
  18. bool busy:1; /* Region busy (for temporarily preventing user access). */
  19. struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
  20. struct nvkm_tags *tags; /* Compression tag reference. */
  21. };
  22. struct nvkm_vmm {
  23. const struct nvkm_vmm_func *func;
  24. struct nvkm_mmu *mmu;
  25. const char *name;
  26. u32 debug;
  27. struct kref kref;
  28. struct mutex mutex;
  29. u64 start;
  30. u64 limit;
  31. struct nvkm_vmm_pt *pd;
  32. struct list_head join;
  33. struct list_head list;
  34. struct rb_root free;
  35. struct rb_root root;
  36. bool bootstrapped;
  37. atomic_t engref[NVKM_SUBDEV_NR];
  38. dma_addr_t null;
  39. void *nullp;
  40. };
  41. int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
  42. struct lock_class_key *, const char *name, struct nvkm_vmm **);
  43. struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
  44. void nvkm_vmm_unref(struct nvkm_vmm **);
  45. int nvkm_vmm_boot(struct nvkm_vmm *);
  46. int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
  47. void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
  48. int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
  49. void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
  50. struct nvkm_vmm_map {
  51. struct nvkm_memory *memory;
  52. u64 offset;
  53. struct nvkm_mm_node *mem;
  54. struct scatterlist *sgl;
  55. dma_addr_t *dma;
  56. u64 off;
  57. const struct nvkm_vmm_page *page;
  58. struct nvkm_tags *tags;
  59. u64 next;
  60. u64 type;
  61. u64 ctag;
  62. };
  63. int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
  64. struct nvkm_vmm_map *);
  65. void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
  66. struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
  67. struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
  68. struct nvkm_mmu {
  69. const struct nvkm_mmu_func *func;
  70. struct nvkm_subdev subdev;
  71. u8 dma_bits;
  72. int heap_nr;
  73. struct {
  74. #define NVKM_MEM_VRAM 0x01
  75. #define NVKM_MEM_HOST 0x02
  76. #define NVKM_MEM_COMP 0x04
  77. #define NVKM_MEM_DISP 0x08
  78. u8 type;
  79. u64 size;
  80. } heap[4];
  81. int type_nr;
  82. struct {
  83. #define NVKM_MEM_KIND 0x10
  84. #define NVKM_MEM_MAPPABLE 0x20
  85. #define NVKM_MEM_COHERENT 0x40
  86. #define NVKM_MEM_UNCACHED 0x80
  87. u8 type;
  88. u8 heap;
  89. } type[16];
  90. struct nvkm_vmm *vmm;
  91. struct {
  92. struct mutex mutex;
  93. struct list_head list;
  94. } ptc, ptp;
  95. struct nvkm_device_oclass user;
  96. };
  97. int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  98. int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  99. int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  100. int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  101. int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  102. int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  103. int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  104. int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  105. int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  106. int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  107. int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  108. int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  109. int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
  110. #endif