vc4_drv.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /*
  2. * Copyright (C) 2015 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/mm_types.h>
  9. #include <linux/reservation.h>
  10. #include <drm/drmP.h>
  11. #include <drm/drm_encoder.h>
  12. #include <drm/drm_gem_cma_helper.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_syncobj.h>
  15. #include "uapi/drm/vc4_drm.h"
  16. /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
  17. * this.
  18. */
  19. enum vc4_kernel_bo_type {
  20. /* Any kernel allocation (gem_create_object hook) before it
  21. * gets another type set.
  22. */
  23. VC4_BO_TYPE_KERNEL,
  24. VC4_BO_TYPE_V3D,
  25. VC4_BO_TYPE_V3D_SHADER,
  26. VC4_BO_TYPE_DUMB,
  27. VC4_BO_TYPE_BIN,
  28. VC4_BO_TYPE_RCL,
  29. VC4_BO_TYPE_BCL,
  30. VC4_BO_TYPE_KERNEL_CACHE,
  31. VC4_BO_TYPE_COUNT
  32. };
  33. /* Performance monitor object. The perform lifetime is controlled by userspace
  34. * using perfmon related ioctls. A perfmon can be attached to a submit_cl
  35. * request, and when this is the case, HW perf counters will be activated just
  36. * before the submit_cl is submitted to the GPU and disabled when the job is
  37. * done. This way, only events related to a specific job will be counted.
  38. */
  39. struct vc4_perfmon {
  40. /* Tracks the number of users of the perfmon, when this counter reaches
  41. * zero the perfmon is destroyed.
  42. */
  43. refcount_t refcnt;
  44. /* Number of counters activated in this perfmon instance
  45. * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
  46. */
  47. u8 ncounters;
  48. /* Events counted by the HW perf counters. */
  49. u8 events[DRM_VC4_MAX_PERF_COUNTERS];
  50. /* Storage for counter values. Counters are incremented by the HW
  51. * perf counter values every time the perfmon is attached to a GPU job.
  52. * This way, perfmon users don't have to retrieve the results after
  53. * each job if they want to track events covering several submissions.
  54. * Note that counter values can't be reset, but you can fake a reset by
  55. * destroying the perfmon and creating a new one.
  56. */
  57. u64 counters[0];
  58. };
  59. struct vc4_dev {
  60. struct drm_device *dev;
  61. struct vc4_hdmi *hdmi;
  62. struct vc4_hvs *hvs;
  63. struct vc4_v3d *v3d;
  64. struct vc4_dpi *dpi;
  65. struct vc4_dsi *dsi1;
  66. struct vc4_vec *vec;
  67. struct vc4_txp *txp;
  68. struct vc4_hang_state *hang_state;
  69. /* The kernel-space BO cache. Tracks buffers that have been
  70. * unreferenced by all other users (refcounts of 0!) but not
  71. * yet freed, so we can do cheap allocations.
  72. */
  73. struct vc4_bo_cache {
  74. /* Array of list heads for entries in the BO cache,
  75. * based on number of pages, so we can do O(1) lookups
  76. * in the cache when allocating.
  77. */
  78. struct list_head *size_list;
  79. uint32_t size_list_size;
  80. /* List of all BOs in the cache, ordered by age, so we
  81. * can do O(1) lookups when trying to free old
  82. * buffers.
  83. */
  84. struct list_head time_list;
  85. struct work_struct time_work;
  86. struct timer_list time_timer;
  87. } bo_cache;
  88. u32 num_labels;
  89. struct vc4_label {
  90. const char *name;
  91. u32 num_allocated;
  92. u32 size_allocated;
  93. } *bo_labels;
  94. /* Protects bo_cache and bo_labels. */
  95. struct mutex bo_lock;
  96. /* Purgeable BO pool. All BOs in this pool can have their memory
  97. * reclaimed if the driver is unable to allocate new BOs. We also
  98. * keep stats related to the purge mechanism here.
  99. */
  100. struct {
  101. struct list_head list;
  102. unsigned int num;
  103. size_t size;
  104. unsigned int purged_num;
  105. size_t purged_size;
  106. struct mutex lock;
  107. } purgeable;
  108. uint64_t dma_fence_context;
  109. /* Sequence number for the last job queued in bin_job_list.
  110. * Starts at 0 (no jobs emitted).
  111. */
  112. uint64_t emit_seqno;
  113. /* Sequence number for the last completed job on the GPU.
  114. * Starts at 0 (no jobs completed).
  115. */
  116. uint64_t finished_seqno;
  117. /* List of all struct vc4_exec_info for jobs to be executed in
  118. * the binner. The first job in the list is the one currently
  119. * programmed into ct0ca for execution.
  120. */
  121. struct list_head bin_job_list;
  122. /* List of all struct vc4_exec_info for jobs that have
  123. * completed binning and are ready for rendering. The first
  124. * job in the list is the one currently programmed into ct1ca
  125. * for execution.
  126. */
  127. struct list_head render_job_list;
  128. /* List of the finished vc4_exec_infos waiting to be freed by
  129. * job_done_work.
  130. */
  131. struct list_head job_done_list;
  132. /* Spinlock used to synchronize the job_list and seqno
  133. * accesses between the IRQ handler and GEM ioctls.
  134. */
  135. spinlock_t job_lock;
  136. wait_queue_head_t job_wait_queue;
  137. struct work_struct job_done_work;
  138. /* Used to track the active perfmon if any. Access to this field is
  139. * protected by job_lock.
  140. */
  141. struct vc4_perfmon *active_perfmon;
  142. /* List of struct vc4_seqno_cb for callbacks to be made from a
  143. * workqueue when the given seqno is passed.
  144. */
  145. struct list_head seqno_cb_list;
  146. /* The memory used for storing binner tile alloc, tile state,
  147. * and overflow memory allocations. This is freed when V3D
  148. * powers down.
  149. */
  150. struct vc4_bo *bin_bo;
  151. /* Size of blocks allocated within bin_bo. */
  152. uint32_t bin_alloc_size;
  153. /* Bitmask of the bin_alloc_size chunks in bin_bo that are
  154. * used.
  155. */
  156. uint32_t bin_alloc_used;
  157. /* Bitmask of the current bin_alloc used for overflow memory. */
  158. uint32_t bin_alloc_overflow;
  159. struct work_struct overflow_mem_work;
  160. int power_refcount;
  161. /* Mutex controlling the power refcount. */
  162. struct mutex power_lock;
  163. struct {
  164. struct timer_list timer;
  165. struct work_struct reset_work;
  166. } hangcheck;
  167. struct semaphore async_modeset;
  168. struct drm_modeset_lock ctm_state_lock;
  169. struct drm_private_obj ctm_manager;
  170. };
  171. static inline struct vc4_dev *
  172. to_vc4_dev(struct drm_device *dev)
  173. {
  174. return (struct vc4_dev *)dev->dev_private;
  175. }
  176. struct vc4_bo {
  177. struct drm_gem_cma_object base;
  178. /* seqno of the last job to render using this BO. */
  179. uint64_t seqno;
  180. /* seqno of the last job to use the RCL to write to this BO.
  181. *
  182. * Note that this doesn't include binner overflow memory
  183. * writes.
  184. */
  185. uint64_t write_seqno;
  186. bool t_format;
  187. /* List entry for the BO's position in either
  188. * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
  189. */
  190. struct list_head unref_head;
  191. /* Time in jiffies when the BO was put in vc4->bo_cache. */
  192. unsigned long free_time;
  193. /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
  194. struct list_head size_head;
  195. /* Struct for shader validation state, if created by
  196. * DRM_IOCTL_VC4_CREATE_SHADER_BO.
  197. */
  198. struct vc4_validated_shader_info *validated_shader;
  199. /* normally (resv == &_resv) except for imported bo's */
  200. struct reservation_object *resv;
  201. struct reservation_object _resv;
  202. /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
  203. * for user-allocated labels.
  204. */
  205. int label;
  206. /* Count the number of active users. This is needed to determine
  207. * whether we can move the BO to the purgeable list or not (when the BO
  208. * is used by the GPU or the display engine we can't purge it).
  209. */
  210. refcount_t usecnt;
  211. /* Store purgeable/purged state here */
  212. u32 madv;
  213. struct mutex madv_lock;
  214. };
  215. static inline struct vc4_bo *
  216. to_vc4_bo(struct drm_gem_object *bo)
  217. {
  218. return (struct vc4_bo *)bo;
  219. }
  220. struct vc4_fence {
  221. struct dma_fence base;
  222. struct drm_device *dev;
  223. /* vc4 seqno for signaled() test */
  224. uint64_t seqno;
  225. };
  226. static inline struct vc4_fence *
  227. to_vc4_fence(struct dma_fence *fence)
  228. {
  229. return (struct vc4_fence *)fence;
  230. }
  231. struct vc4_seqno_cb {
  232. struct work_struct work;
  233. uint64_t seqno;
  234. void (*func)(struct vc4_seqno_cb *cb);
  235. };
  236. struct vc4_v3d {
  237. struct vc4_dev *vc4;
  238. struct platform_device *pdev;
  239. void __iomem *regs;
  240. struct clk *clk;
  241. };
  242. struct vc4_hvs {
  243. struct platform_device *pdev;
  244. void __iomem *regs;
  245. u32 __iomem *dlist;
  246. /* Memory manager for CRTCs to allocate space in the display
  247. * list. Units are dwords.
  248. */
  249. struct drm_mm dlist_mm;
  250. /* Memory manager for the LBM memory used by HVS scaling. */
  251. struct drm_mm lbm_mm;
  252. spinlock_t mm_lock;
  253. struct drm_mm_node mitchell_netravali_filter;
  254. };
  255. struct vc4_plane {
  256. struct drm_plane base;
  257. };
  258. static inline struct vc4_plane *
  259. to_vc4_plane(struct drm_plane *plane)
  260. {
  261. return (struct vc4_plane *)plane;
  262. }
  263. enum vc4_scaling_mode {
  264. VC4_SCALING_NONE,
  265. VC4_SCALING_TPZ,
  266. VC4_SCALING_PPF,
  267. };
  268. struct vc4_plane_state {
  269. struct drm_plane_state base;
  270. /* System memory copy of the display list for this element, computed
  271. * at atomic_check time.
  272. */
  273. u32 *dlist;
  274. u32 dlist_size; /* Number of dwords allocated for the display list */
  275. u32 dlist_count; /* Number of used dwords in the display list. */
  276. /* Offset in the dlist to various words, for pageflip or
  277. * cursor updates.
  278. */
  279. u32 pos0_offset;
  280. u32 pos2_offset;
  281. u32 ptr0_offset;
  282. /* Offset where the plane's dlist was last stored in the
  283. * hardware at vc4_crtc_atomic_flush() time.
  284. */
  285. u32 __iomem *hw_dlist;
  286. /* Clipped coordinates of the plane on the display. */
  287. int crtc_x, crtc_y, crtc_w, crtc_h;
  288. /* Clipped area being scanned from in the FB. */
  289. u32 src_x, src_y;
  290. u32 src_w[2], src_h[2];
  291. /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
  292. enum vc4_scaling_mode x_scaling[2], y_scaling[2];
  293. bool is_unity;
  294. bool is_yuv;
  295. /* Offset to start scanning out from the start of the plane's
  296. * BO.
  297. */
  298. u32 offsets[3];
  299. /* Our allocation in LBM for temporary storage during scaling. */
  300. struct drm_mm_node lbm;
  301. /* Set when the plane has per-pixel alpha content or does not cover
  302. * the entire screen. This is a hint to the CRTC that it might need
  303. * to enable background color fill.
  304. */
  305. bool needs_bg_fill;
  306. };
  307. static inline struct vc4_plane_state *
  308. to_vc4_plane_state(struct drm_plane_state *state)
  309. {
  310. return (struct vc4_plane_state *)state;
  311. }
  312. enum vc4_encoder_type {
  313. VC4_ENCODER_TYPE_NONE,
  314. VC4_ENCODER_TYPE_HDMI,
  315. VC4_ENCODER_TYPE_VEC,
  316. VC4_ENCODER_TYPE_DSI0,
  317. VC4_ENCODER_TYPE_DSI1,
  318. VC4_ENCODER_TYPE_SMI,
  319. VC4_ENCODER_TYPE_DPI,
  320. };
  321. struct vc4_encoder {
  322. struct drm_encoder base;
  323. enum vc4_encoder_type type;
  324. u32 clock_select;
  325. };
  326. static inline struct vc4_encoder *
  327. to_vc4_encoder(struct drm_encoder *encoder)
  328. {
  329. return container_of(encoder, struct vc4_encoder, base);
  330. }
  331. struct vc4_crtc_data {
  332. /* Which channel of the HVS this pixelvalve sources from. */
  333. int hvs_channel;
  334. enum vc4_encoder_type encoder_types[4];
  335. };
  336. struct vc4_crtc {
  337. struct drm_crtc base;
  338. const struct vc4_crtc_data *data;
  339. void __iomem *regs;
  340. /* Timestamp at start of vblank irq - unaffected by lock delays. */
  341. ktime_t t_vblank;
  342. /* Which HVS channel we're using for our CRTC. */
  343. int channel;
  344. u8 lut_r[256];
  345. u8 lut_g[256];
  346. u8 lut_b[256];
  347. /* Size in pixels of the COB memory allocated to this CRTC. */
  348. u32 cob_size;
  349. struct drm_pending_vblank_event *event;
  350. };
  351. static inline struct vc4_crtc *
  352. to_vc4_crtc(struct drm_crtc *crtc)
  353. {
  354. return (struct vc4_crtc *)crtc;
  355. }
  356. #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
  357. #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
  358. #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
  359. #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
  360. struct vc4_exec_info {
  361. /* Sequence number for this bin/render job. */
  362. uint64_t seqno;
  363. /* Latest write_seqno of any BO that binning depends on. */
  364. uint64_t bin_dep_seqno;
  365. struct dma_fence *fence;
  366. /* Last current addresses the hardware was processing when the
  367. * hangcheck timer checked on us.
  368. */
  369. uint32_t last_ct0ca, last_ct1ca;
  370. /* Kernel-space copy of the ioctl arguments */
  371. struct drm_vc4_submit_cl *args;
  372. /* This is the array of BOs that were looked up at the start of exec.
  373. * Command validation will use indices into this array.
  374. */
  375. struct drm_gem_cma_object **bo;
  376. uint32_t bo_count;
  377. /* List of BOs that are being written by the RCL. Other than
  378. * the binner temporary storage, this is all the BOs written
  379. * by the job.
  380. */
  381. struct drm_gem_cma_object *rcl_write_bo[4];
  382. uint32_t rcl_write_bo_count;
  383. /* Pointers for our position in vc4->job_list */
  384. struct list_head head;
  385. /* List of other BOs used in the job that need to be released
  386. * once the job is complete.
  387. */
  388. struct list_head unref_list;
  389. /* Current unvalidated indices into @bo loaded by the non-hardware
  390. * VC4_PACKET_GEM_HANDLES.
  391. */
  392. uint32_t bo_index[2];
  393. /* This is the BO where we store the validated command lists, shader
  394. * records, and uniforms.
  395. */
  396. struct drm_gem_cma_object *exec_bo;
  397. /**
  398. * This tracks the per-shader-record state (packet 64) that
  399. * determines the length of the shader record and the offset
  400. * it's expected to be found at. It gets read in from the
  401. * command lists.
  402. */
  403. struct vc4_shader_state {
  404. uint32_t addr;
  405. /* Maximum vertex index referenced by any primitive using this
  406. * shader state.
  407. */
  408. uint32_t max_index;
  409. } *shader_state;
  410. /** How many shader states the user declared they were using. */
  411. uint32_t shader_state_size;
  412. /** How many shader state records the validator has seen. */
  413. uint32_t shader_state_count;
  414. bool found_tile_binning_mode_config_packet;
  415. bool found_start_tile_binning_packet;
  416. bool found_increment_semaphore_packet;
  417. bool found_flush;
  418. uint8_t bin_tiles_x, bin_tiles_y;
  419. /* Physical address of the start of the tile alloc array
  420. * (where each tile's binned CL will start)
  421. */
  422. uint32_t tile_alloc_offset;
  423. /* Bitmask of which binner slots are freed when this job completes. */
  424. uint32_t bin_slots;
  425. /**
  426. * Computed addresses pointing into exec_bo where we start the
  427. * bin thread (ct0) and render thread (ct1).
  428. */
  429. uint32_t ct0ca, ct0ea;
  430. uint32_t ct1ca, ct1ea;
  431. /* Pointer to the unvalidated bin CL (if present). */
  432. void *bin_u;
  433. /* Pointers to the shader recs. These paddr gets incremented as CL
  434. * packets are relocated in validate_gl_shader_state, and the vaddrs
  435. * (u and v) get incremented and size decremented as the shader recs
  436. * themselves are validated.
  437. */
  438. void *shader_rec_u;
  439. void *shader_rec_v;
  440. uint32_t shader_rec_p;
  441. uint32_t shader_rec_size;
  442. /* Pointers to the uniform data. These pointers are incremented, and
  443. * size decremented, as each batch of uniforms is uploaded.
  444. */
  445. void *uniforms_u;
  446. void *uniforms_v;
  447. uint32_t uniforms_p;
  448. uint32_t uniforms_size;
  449. /* Pointer to a performance monitor object if the user requested it,
  450. * NULL otherwise.
  451. */
  452. struct vc4_perfmon *perfmon;
  453. };
  454. /* Per-open file private data. Any driver-specific resource that has to be
  455. * released when the DRM file is closed should be placed here.
  456. */
  457. struct vc4_file {
  458. struct {
  459. struct idr idr;
  460. struct mutex lock;
  461. } perfmon;
  462. };
  463. static inline struct vc4_exec_info *
  464. vc4_first_bin_job(struct vc4_dev *vc4)
  465. {
  466. return list_first_entry_or_null(&vc4->bin_job_list,
  467. struct vc4_exec_info, head);
  468. }
  469. static inline struct vc4_exec_info *
  470. vc4_first_render_job(struct vc4_dev *vc4)
  471. {
  472. return list_first_entry_or_null(&vc4->render_job_list,
  473. struct vc4_exec_info, head);
  474. }
  475. static inline struct vc4_exec_info *
  476. vc4_last_render_job(struct vc4_dev *vc4)
  477. {
  478. if (list_empty(&vc4->render_job_list))
  479. return NULL;
  480. return list_last_entry(&vc4->render_job_list,
  481. struct vc4_exec_info, head);
  482. }
  483. /**
  484. * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
  485. * setup parameters.
  486. *
  487. * This will be used at draw time to relocate the reference to the texture
  488. * contents in p0, and validate that the offset combined with
  489. * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
  490. * Note that the hardware treats unprovided config parameters as 0, so not all
  491. * of them need to be set up for every texure sample, and we'll store ~0 as
  492. * the offset to mark the unused ones.
  493. *
  494. * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
  495. * Setup") for definitions of the texture parameters.
  496. */
  497. struct vc4_texture_sample_info {
  498. bool is_direct;
  499. uint32_t p_offset[4];
  500. };
  501. /**
  502. * struct vc4_validated_shader_info - information about validated shaders that
  503. * needs to be used from command list validation.
  504. *
  505. * For a given shader, each time a shader state record references it, we need
  506. * to verify that the shader doesn't read more uniforms than the shader state
  507. * record's uniform BO pointer can provide, and we need to apply relocations
  508. * and validate the shader state record's uniforms that define the texture
  509. * samples.
  510. */
  511. struct vc4_validated_shader_info {
  512. uint32_t uniforms_size;
  513. uint32_t uniforms_src_size;
  514. uint32_t num_texture_samples;
  515. struct vc4_texture_sample_info *texture_samples;
  516. uint32_t num_uniform_addr_offsets;
  517. uint32_t *uniform_addr_offsets;
  518. bool is_threaded;
  519. };
  520. /**
  521. * _wait_for - magic (register) wait macro
  522. *
  523. * Does the right thing for modeset paths when run under kdgb or similar atomic
  524. * contexts. Note that it's important that we check the condition again after
  525. * having timed out, since the timeout could be due to preemption or similar and
  526. * we've never had a chance to check the condition before the timeout.
  527. */
  528. #define _wait_for(COND, MS, W) ({ \
  529. unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
  530. int ret__ = 0; \
  531. while (!(COND)) { \
  532. if (time_after(jiffies, timeout__)) { \
  533. if (!(COND)) \
  534. ret__ = -ETIMEDOUT; \
  535. break; \
  536. } \
  537. if (W && drm_can_sleep()) { \
  538. msleep(W); \
  539. } else { \
  540. cpu_relax(); \
  541. } \
  542. } \
  543. ret__; \
  544. })
  545. #define wait_for(COND, MS) _wait_for(COND, MS, 1)
  546. /* vc4_bo.c */
  547. struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
  548. void vc4_free_object(struct drm_gem_object *gem_obj);
  549. struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
  550. bool from_cache, enum vc4_kernel_bo_type type);
  551. int vc4_dumb_create(struct drm_file *file_priv,
  552. struct drm_device *dev,
  553. struct drm_mode_create_dumb *args);
  554. struct dma_buf *vc4_prime_export(struct drm_device *dev,
  555. struct drm_gem_object *obj, int flags);
  556. int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
  557. struct drm_file *file_priv);
  558. int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
  559. struct drm_file *file_priv);
  560. int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
  561. struct drm_file *file_priv);
  562. int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
  563. struct drm_file *file_priv);
  564. int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
  565. struct drm_file *file_priv);
  566. int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
  567. struct drm_file *file_priv);
  568. int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
  569. struct drm_file *file_priv);
  570. vm_fault_t vc4_fault(struct vm_fault *vmf);
  571. int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
  572. struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
  573. int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  574. struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
  575. struct dma_buf_attachment *attach,
  576. struct sg_table *sgt);
  577. void *vc4_prime_vmap(struct drm_gem_object *obj);
  578. int vc4_bo_cache_init(struct drm_device *dev);
  579. void vc4_bo_cache_destroy(struct drm_device *dev);
  580. int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
  581. int vc4_bo_inc_usecnt(struct vc4_bo *bo);
  582. void vc4_bo_dec_usecnt(struct vc4_bo *bo);
  583. void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
  584. void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
  585. /* vc4_crtc.c */
  586. extern struct platform_driver vc4_crtc_driver;
  587. int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
  588. bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
  589. bool in_vblank_irq, int *vpos, int *hpos,
  590. ktime_t *stime, ktime_t *etime,
  591. const struct drm_display_mode *mode);
  592. void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
  593. void vc4_crtc_txp_armed(struct drm_crtc_state *state);
  594. /* vc4_debugfs.c */
  595. int vc4_debugfs_init(struct drm_minor *minor);
  596. /* vc4_drv.c */
  597. void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
  598. /* vc4_dpi.c */
  599. extern struct platform_driver vc4_dpi_driver;
  600. int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
  601. /* vc4_dsi.c */
  602. extern struct platform_driver vc4_dsi_driver;
  603. int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
  604. /* vc4_fence.c */
  605. extern const struct dma_fence_ops vc4_fence_ops;
  606. /* vc4_gem.c */
  607. void vc4_gem_init(struct drm_device *dev);
  608. void vc4_gem_destroy(struct drm_device *dev);
  609. int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
  610. struct drm_file *file_priv);
  611. int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
  612. struct drm_file *file_priv);
  613. int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
  614. struct drm_file *file_priv);
  615. void vc4_submit_next_bin_job(struct drm_device *dev);
  616. void vc4_submit_next_render_job(struct drm_device *dev);
  617. void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
  618. int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
  619. uint64_t timeout_ns, bool interruptible);
  620. void vc4_job_handle_completed(struct vc4_dev *vc4);
  621. int vc4_queue_seqno_cb(struct drm_device *dev,
  622. struct vc4_seqno_cb *cb, uint64_t seqno,
  623. void (*func)(struct vc4_seqno_cb *cb));
  624. int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
  625. struct drm_file *file_priv);
  626. /* vc4_hdmi.c */
  627. extern struct platform_driver vc4_hdmi_driver;
  628. int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
  629. /* vc4_vec.c */
  630. extern struct platform_driver vc4_vec_driver;
  631. int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
  632. /* vc4_txp.c */
  633. extern struct platform_driver vc4_txp_driver;
  634. int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
  635. /* vc4_irq.c */
  636. irqreturn_t vc4_irq(int irq, void *arg);
  637. void vc4_irq_preinstall(struct drm_device *dev);
  638. int vc4_irq_postinstall(struct drm_device *dev);
  639. void vc4_irq_uninstall(struct drm_device *dev);
  640. void vc4_irq_reset(struct drm_device *dev);
  641. /* vc4_hvs.c */
  642. extern struct platform_driver vc4_hvs_driver;
  643. void vc4_hvs_dump_state(struct drm_device *dev);
  644. int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
  645. /* vc4_kms.c */
  646. int vc4_kms_load(struct drm_device *dev);
  647. /* vc4_plane.c */
  648. struct drm_plane *vc4_plane_init(struct drm_device *dev,
  649. enum drm_plane_type type);
  650. u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
  651. u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
  652. void vc4_plane_async_set_fb(struct drm_plane *plane,
  653. struct drm_framebuffer *fb);
  654. /* vc4_v3d.c */
  655. extern struct platform_driver vc4_v3d_driver;
  656. int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
  657. int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
  658. int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
  659. /* vc4_validate.c */
  660. int
  661. vc4_validate_bin_cl(struct drm_device *dev,
  662. void *validated,
  663. void *unvalidated,
  664. struct vc4_exec_info *exec);
  665. int
  666. vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
  667. struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
  668. uint32_t hindex);
  669. int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
  670. bool vc4_check_tex_size(struct vc4_exec_info *exec,
  671. struct drm_gem_cma_object *fbo,
  672. uint32_t offset, uint8_t tiling_format,
  673. uint32_t width, uint32_t height, uint8_t cpp);
  674. /* vc4_validate_shader.c */
  675. struct vc4_validated_shader_info *
  676. vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
  677. /* vc4_perfmon.c */
  678. void vc4_perfmon_get(struct vc4_perfmon *perfmon);
  679. void vc4_perfmon_put(struct vc4_perfmon *perfmon);
  680. void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
  681. void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
  682. bool capture);
  683. struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
  684. void vc4_perfmon_open_file(struct vc4_file *vc4file);
  685. void vc4_perfmon_close_file(struct vc4_file *vc4file);
  686. int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
  687. struct drm_file *file_priv);
  688. int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
  689. struct drm_file *file_priv);
  690. int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
  691. struct drm_file *file_priv);