xen_drm_front.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #ifndef __XEN_DRM_FRONT_H_
  10. #define __XEN_DRM_FRONT_H_
  11. #include <drm/drmP.h>
  12. #include <drm/drm_simple_kms_helper.h>
  13. #include <linux/scatterlist.h>
  14. #include "xen_drm_front_cfg.h"
  15. /**
  16. * DOC: Driver modes of operation in terms of display buffers used
  17. *
  18. * Depending on the requirements for the para-virtualized environment, namely
  19. * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
  20. * host and guest environments, display buffers can be allocated by either
  21. * frontend driver or backend.
  22. */
  23. /**
  24. * DOC: Buffers allocated by the frontend driver
  25. *
  26. * In this mode of operation driver allocates buffers from system memory.
  27. *
  28. * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
  29. * may require IOMMU support on the platform, so accompanying DRM/vGPU
  30. * hardware can still reach display buffer memory while importing PRIME
  31. * buffers from the frontend driver.
  32. */
  33. /**
  34. * DOC: Buffers allocated by the backend
  35. *
  36. * This mode of operation is run-time configured via guest domain configuration
  37. * through XenStore entries.
  38. *
  39. * For systems which do not provide IOMMU support, but having specific
  40. * requirements for display buffers it is possible to allocate such buffers
  41. * at backend side and share those with the frontend.
  42. * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
  43. * physically contiguous memory, this allows implementing zero-copying
  44. * use-cases.
  45. *
  46. * Note, while using this scenario the following should be considered:
  47. *
  48. * #. If guest domain dies then pages/grants received from the backend
  49. * cannot be claimed back
  50. *
  51. * #. Misbehaving guest may send too many requests to the
  52. * backend exhausting its grant references and memory
  53. * (consider this from security POV)
  54. */
  55. /**
  56. * DOC: Driver limitations
  57. *
  58. * #. Only primary plane without additional properties is supported.
  59. *
  60. * #. Only one video mode per connector supported which is configured
  61. * via XenStore.
  62. *
  63. * #. All CRTCs operate at fixed frequency of 60Hz.
  64. */
  65. /* timeout in ms to wait for backend to respond */
  66. #define XEN_DRM_FRONT_WAIT_BACK_MS 3000
  67. #ifndef GRANT_INVALID_REF
  68. /*
  69. * Note on usage of grant reference 0 as invalid grant reference:
  70. * grant reference 0 is valid, but never exposed to a PV driver,
  71. * because of the fact it is already in use/reserved by the PV console.
  72. */
  73. #define GRANT_INVALID_REF 0
  74. #endif
  75. struct xen_drm_front_info {
  76. struct xenbus_device *xb_dev;
  77. struct xen_drm_front_drm_info *drm_info;
  78. /* to protect data between backend IO code and interrupt handler */
  79. spinlock_t io_lock;
  80. int num_evt_pairs;
  81. struct xen_drm_front_evtchnl_pair *evt_pairs;
  82. struct xen_drm_front_cfg cfg;
  83. /* display buffers */
  84. struct list_head dbuf_list;
  85. };
  86. struct xen_drm_front_drm_pipeline {
  87. struct xen_drm_front_drm_info *drm_info;
  88. int index;
  89. struct drm_simple_display_pipe pipe;
  90. struct drm_connector conn;
  91. /* These are only for connector mode checking */
  92. int width, height;
  93. struct drm_pending_vblank_event *pending_event;
  94. struct delayed_work pflip_to_worker;
  95. bool conn_connected;
  96. };
  97. struct xen_drm_front_drm_info {
  98. struct xen_drm_front_info *front_info;
  99. struct drm_device *drm_dev;
  100. struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
  101. };
  102. static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
  103. {
  104. return (uintptr_t)fb;
  105. }
  106. static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
  107. {
  108. return (uintptr_t)gem_obj;
  109. }
  110. int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
  111. u32 x, u32 y, u32 width, u32 height,
  112. u32 bpp, u64 fb_cookie);
  113. int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
  114. u64 dbuf_cookie, u32 width, u32 height,
  115. u32 bpp, u64 size, struct page **pages);
  116. int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
  117. u64 dbuf_cookie, u64 fb_cookie, u32 width,
  118. u32 height, u32 pixel_format);
  119. int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
  120. u64 fb_cookie);
  121. int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
  122. int conn_idx, u64 fb_cookie);
  123. void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
  124. int conn_idx, u64 fb_cookie);
  125. #endif /* __XEN_DRM_FRONT_H_ */