a5xx_gpu.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #ifndef __A5XX_GPU_H__
  14. #define __A5XX_GPU_H__
  15. #include "adreno_gpu.h"
  16. /* Bringing over the hack from the previous targets */
  17. #undef ROP_COPY
  18. #undef ROP_XOR
  19. #include "a5xx.xml.h"
  20. struct a5xx_gpu {
  21. struct adreno_gpu base;
  22. struct drm_gem_object *pm4_bo;
  23. uint64_t pm4_iova;
  24. struct drm_gem_object *pfp_bo;
  25. uint64_t pfp_iova;
  26. struct drm_gem_object *gpmu_bo;
  27. uint64_t gpmu_iova;
  28. uint32_t gpmu_dwords;
  29. uint32_t lm_leakage;
  30. struct msm_ringbuffer *cur_ring;
  31. struct msm_ringbuffer *next_ring;
  32. struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
  33. struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
  34. uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
  35. atomic_t preempt_state;
  36. struct timer_list preempt_timer;
  37. };
  38. #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
  39. #ifdef CONFIG_DEBUG_FS
  40. int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
  41. #endif
  42. /*
  43. * In order to do lockless preemption we use a simple state machine to progress
  44. * through the process.
  45. *
  46. * PREEMPT_NONE - no preemption in progress. Next state START.
  47. * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
  48. * states: TRIGGERED, NONE
  49. * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
  50. * state: NONE.
  51. * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
  52. * states: FAULTED, PENDING
  53. * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
  54. * recovery. Next state: N/A
  55. * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
  56. * checking the success of the operation. Next state: FAULTED, NONE.
  57. */
  58. enum preempt_state {
  59. PREEMPT_NONE = 0,
  60. PREEMPT_START,
  61. PREEMPT_ABORT,
  62. PREEMPT_TRIGGERED,
  63. PREEMPT_FAULTED,
  64. PREEMPT_PENDING,
  65. };
  66. /*
  67. * struct a5xx_preempt_record is a shared buffer between the microcode and the
  68. * CPU to store the state for preemption. The record itself is much larger
  69. * (64k) but most of that is used by the CP for storage.
  70. *
  71. * There is a preemption record assigned per ringbuffer. When the CPU triggers a
  72. * preemption, it fills out the record with the useful information (wptr, ring
  73. * base, etc) and the microcode uses that information to set up the CP following
  74. * the preemption. When a ring is switched out, the CP will save the ringbuffer
  75. * state back to the record. In this way, once the records are properly set up
  76. * the CPU can quickly switch back and forth between ringbuffers by only
  77. * updating a few registers (often only the wptr).
  78. *
  79. * These are the CPU aware registers in the record:
  80. * @magic: Must always be 0x27C4BAFC
  81. * @info: Type of the record - written 0 by the CPU, updated by the CP
  82. * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
  83. * the CP
  84. * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
  85. * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
  86. * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
  87. * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
  88. * @rbase: Value of RB_BASE written by CPU, save/restored by CP
  89. * @counter: GPU address of the storage area for the performance counters
  90. */
  91. struct a5xx_preempt_record {
  92. uint32_t magic;
  93. uint32_t info;
  94. uint32_t data;
  95. uint32_t cntl;
  96. uint32_t rptr;
  97. uint32_t wptr;
  98. uint64_t rptr_addr;
  99. uint64_t rbase;
  100. uint64_t counter;
  101. };
  102. /* Magic identifier for the preemption record */
  103. #define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
  104. /*
  105. * Even though the structure above is only a few bytes, we need a full 64k to
  106. * store the entire preemption record from the CP
  107. */
  108. #define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
  109. /*
  110. * The preemption counter block is a storage area for the value of the
  111. * preemption counters that are saved immediately before context switch. We
  112. * append it on to the end of the allocation for the preemption record.
  113. */
  114. #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
  115. int a5xx_power_init(struct msm_gpu *gpu);
  116. void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
  117. static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
  118. uint32_t reg, uint32_t mask, uint32_t value)
  119. {
  120. while (usecs--) {
  121. udelay(1);
  122. if ((gpu_read(gpu, reg) & mask) == value)
  123. return 0;
  124. cpu_relax();
  125. }
  126. return -ETIMEDOUT;
  127. }
  128. bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
  129. void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
  130. void a5xx_preempt_init(struct msm_gpu *gpu);
  131. void a5xx_preempt_hw_init(struct msm_gpu *gpu);
  132. void a5xx_preempt_trigger(struct msm_gpu *gpu);
  133. void a5xx_preempt_irq(struct msm_gpu *gpu);
  134. void a5xx_preempt_fini(struct msm_gpu *gpu);
  135. /* Return true if we are in a preempt state */
  136. static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
  137. {
  138. int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
  139. return !(preempt_state == PREEMPT_NONE ||
  140. preempt_state == PREEMPT_ABORT);
  141. }
  142. #endif /* __A5XX_GPU_H__ */