hwsq.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #ifndef __NVKM_BUS_HWSQ_H__
  2. #define __NVKM_BUS_HWSQ_H__
  3. #include <subdev/bus.h>
  4. struct hwsq {
  5. struct nvkm_subdev *subdev;
  6. struct nvkm_hwsq *hwsq;
  7. int sequence;
  8. };
  9. struct hwsq_reg {
  10. int sequence;
  11. bool force;
  12. u32 addr;
  13. u32 stride; /* in bytes */
  14. u32 mask;
  15. u32 data;
  16. };
  17. static inline struct hwsq_reg
  18. hwsq_stride(u32 addr, u32 stride, u32 mask)
  19. {
  20. return (struct hwsq_reg) {
  21. .sequence = 0,
  22. .force = 0,
  23. .addr = addr,
  24. .stride = stride,
  25. .mask = mask,
  26. .data = 0xdeadbeef,
  27. };
  28. }
  29. static inline struct hwsq_reg
  30. hwsq_reg2(u32 addr1, u32 addr2)
  31. {
  32. return (struct hwsq_reg) {
  33. .sequence = 0,
  34. .force = 0,
  35. .addr = addr1,
  36. .stride = addr2 - addr1,
  37. .mask = 0x3,
  38. .data = 0xdeadbeef,
  39. };
  40. }
  41. static inline struct hwsq_reg
  42. hwsq_reg(u32 addr)
  43. {
  44. return (struct hwsq_reg) {
  45. .sequence = 0,
  46. .force = 0,
  47. .addr = addr,
  48. .stride = 0,
  49. .mask = 0x1,
  50. .data = 0xdeadbeef,
  51. };
  52. }
  53. static inline int
  54. hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
  55. {
  56. int ret;
  57. ret = nvkm_hwsq_init(subdev, &ram->hwsq);
  58. if (ret)
  59. return ret;
  60. ram->sequence++;
  61. ram->subdev = subdev;
  62. return 0;
  63. }
  64. static inline int
  65. hwsq_exec(struct hwsq *ram, bool exec)
  66. {
  67. int ret = 0;
  68. if (ram->subdev) {
  69. ret = nvkm_hwsq_fini(&ram->hwsq, exec);
  70. ram->subdev = NULL;
  71. }
  72. return ret;
  73. }
  74. static inline u32
  75. hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
  76. {
  77. struct nvkm_device *device = ram->subdev->device;
  78. if (reg->sequence != ram->sequence)
  79. reg->data = nvkm_rd32(device, reg->addr);
  80. return reg->data;
  81. }
  82. static inline void
  83. hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
  84. {
  85. u32 mask, off = 0;
  86. reg->sequence = ram->sequence;
  87. reg->data = data;
  88. for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
  89. if (mask & 1)
  90. nvkm_hwsq_wr32(ram->hwsq, reg->addr+off, reg->data);
  91. off += reg->stride;
  92. }
  93. }
  94. static inline void
  95. hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
  96. {
  97. reg->force = true;
  98. }
  99. static inline u32
  100. hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
  101. {
  102. u32 temp = hwsq_rd32(ram, reg);
  103. if (temp != ((temp & ~mask) | data) || reg->force)
  104. hwsq_wr32(ram, reg, (temp & ~mask) | data);
  105. return temp;
  106. }
  107. static inline void
  108. hwsq_setf(struct hwsq *ram, u8 flag, int data)
  109. {
  110. nvkm_hwsq_setf(ram->hwsq, flag, data);
  111. }
  112. static inline void
  113. hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
  114. {
  115. nvkm_hwsq_wait(ram->hwsq, flag, data);
  116. }
  117. static inline void
  118. hwsq_wait_vblank(struct hwsq *ram)
  119. {
  120. nvkm_hwsq_wait_vblank(ram->hwsq);
  121. }
  122. static inline void
  123. hwsq_nsec(struct hwsq *ram, u32 nsec)
  124. {
  125. nvkm_hwsq_nsec(ram->hwsq, nsec);
  126. }
  127. #endif