a6xx_gpu.h 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
  3. #ifndef __A6XX_GPU_H__
  4. #define __A6XX_GPU_H__
  5. #include "adreno_gpu.h"
  6. #include "a6xx.xml.h"
  7. #include "a6xx_gmu.h"
  8. extern bool hang_debug;
  9. struct a6xx_gpu {
  10. struct adreno_gpu base;
  11. struct drm_gem_object *sqe_bo;
  12. uint64_t sqe_iova;
  13. struct msm_ringbuffer *cur_ring;
  14. struct a6xx_gmu gmu;
  15. };
  16. #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
  17. /*
  18. * Given a register and a count, return a value to program into
  19. * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
  20. * registers starting at _reg.
  21. */
  22. #define A6XX_PROTECT_RW(_reg, _len) \
  23. ((1 << 31) | \
  24. (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
  25. /*
  26. * Same as above, but allow reads over the range. For areas of mixed use (such
  27. * as performance counters) this allows us to protect a much larger range with a
  28. * single register
  29. */
  30. #define A6XX_PROTECT_RDONLY(_reg, _len) \
  31. ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
  32. int a6xx_gmu_resume(struct a6xx_gpu *gpu);
  33. int a6xx_gmu_stop(struct a6xx_gpu *gpu);
  34. int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
  35. int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
  36. bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
  37. int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
  38. void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
  39. int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
  40. void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
  41. void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
  42. unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
  43. #endif /* __A6XX_GPU_H__ */