xen_drm_front.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #include <drm/drmP.h>
  10. #include <drm/drm_atomic_helper.h>
  11. #include <drm/drm_crtc_helper.h>
  12. #include <drm/drm_gem.h>
  13. #include <linux/of_device.h>
  14. #include <xen/platform_pci.h>
  15. #include <xen/xen.h>
  16. #include <xen/xenbus.h>
  17. #include <xen/interface/io/displif.h>
  18. #include "xen_drm_front.h"
  19. #include "xen_drm_front_cfg.h"
  20. #include "xen_drm_front_evtchnl.h"
  21. #include "xen_drm_front_gem.h"
  22. #include "xen_drm_front_kms.h"
  23. #include "xen_drm_front_shbuf.h"
  24. struct xen_drm_front_dbuf {
  25. struct list_head list;
  26. u64 dbuf_cookie;
  27. u64 fb_cookie;
  28. struct xen_drm_front_shbuf *shbuf;
  29. };
  30. static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
  31. struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
  32. {
  33. struct xen_drm_front_dbuf *dbuf;
  34. dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
  35. if (!dbuf)
  36. return -ENOMEM;
  37. dbuf->dbuf_cookie = dbuf_cookie;
  38. dbuf->shbuf = shbuf;
  39. list_add(&dbuf->list, &front_info->dbuf_list);
  40. return 0;
  41. }
  42. static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
  43. u64 dbuf_cookie)
  44. {
  45. struct xen_drm_front_dbuf *buf, *q;
  46. list_for_each_entry_safe(buf, q, dbuf_list, list)
  47. if (buf->dbuf_cookie == dbuf_cookie)
  48. return buf;
  49. return NULL;
  50. }
  51. static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
  52. {
  53. struct xen_drm_front_dbuf *buf, *q;
  54. list_for_each_entry_safe(buf, q, dbuf_list, list)
  55. if (buf->fb_cookie == fb_cookie)
  56. xen_drm_front_shbuf_flush(buf->shbuf);
  57. }
  58. static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
  59. {
  60. struct xen_drm_front_dbuf *buf, *q;
  61. list_for_each_entry_safe(buf, q, dbuf_list, list)
  62. if (buf->dbuf_cookie == dbuf_cookie) {
  63. list_del(&buf->list);
  64. xen_drm_front_shbuf_unmap(buf->shbuf);
  65. xen_drm_front_shbuf_free(buf->shbuf);
  66. kfree(buf);
  67. break;
  68. }
  69. }
  70. static void dbuf_free_all(struct list_head *dbuf_list)
  71. {
  72. struct xen_drm_front_dbuf *buf, *q;
  73. list_for_each_entry_safe(buf, q, dbuf_list, list) {
  74. list_del(&buf->list);
  75. xen_drm_front_shbuf_unmap(buf->shbuf);
  76. xen_drm_front_shbuf_free(buf->shbuf);
  77. kfree(buf);
  78. }
  79. }
  80. static struct xendispl_req *
  81. be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
  82. {
  83. struct xendispl_req *req;
  84. req = RING_GET_REQUEST(&evtchnl->u.req.ring,
  85. evtchnl->u.req.ring.req_prod_pvt);
  86. req->operation = operation;
  87. req->id = evtchnl->evt_next_id++;
  88. evtchnl->evt_id = req->id;
  89. return req;
  90. }
  91. static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
  92. struct xendispl_req *req)
  93. {
  94. reinit_completion(&evtchnl->u.req.completion);
  95. if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
  96. return -EIO;
  97. xen_drm_front_evtchnl_flush(evtchnl);
  98. return 0;
  99. }
  100. static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
  101. {
  102. if (wait_for_completion_timeout(&evtchnl->u.req.completion,
  103. msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
  104. return -ETIMEDOUT;
  105. return evtchnl->u.req.resp_status;
  106. }
  107. int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
  108. u32 x, u32 y, u32 width, u32 height,
  109. u32 bpp, u64 fb_cookie)
  110. {
  111. struct xen_drm_front_evtchnl *evtchnl;
  112. struct xen_drm_front_info *front_info;
  113. struct xendispl_req *req;
  114. unsigned long flags;
  115. int ret;
  116. front_info = pipeline->drm_info->front_info;
  117. evtchnl = &front_info->evt_pairs[pipeline->index].req;
  118. if (unlikely(!evtchnl))
  119. return -EIO;
  120. mutex_lock(&evtchnl->u.req.req_io_lock);
  121. spin_lock_irqsave(&front_info->io_lock, flags);
  122. req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
  123. req->op.set_config.x = x;
  124. req->op.set_config.y = y;
  125. req->op.set_config.width = width;
  126. req->op.set_config.height = height;
  127. req->op.set_config.bpp = bpp;
  128. req->op.set_config.fb_cookie = fb_cookie;
  129. ret = be_stream_do_io(evtchnl, req);
  130. spin_unlock_irqrestore(&front_info->io_lock, flags);
  131. if (ret == 0)
  132. ret = be_stream_wait_io(evtchnl);
  133. mutex_unlock(&evtchnl->u.req.req_io_lock);
  134. return ret;
  135. }
  136. int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
  137. u64 dbuf_cookie, u32 width, u32 height,
  138. u32 bpp, u64 size, struct page **pages)
  139. {
  140. struct xen_drm_front_evtchnl *evtchnl;
  141. struct xen_drm_front_shbuf *shbuf;
  142. struct xendispl_req *req;
  143. struct xen_drm_front_shbuf_cfg buf_cfg;
  144. unsigned long flags;
  145. int ret;
  146. evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
  147. if (unlikely(!evtchnl))
  148. return -EIO;
  149. memset(&buf_cfg, 0, sizeof(buf_cfg));
  150. buf_cfg.xb_dev = front_info->xb_dev;
  151. buf_cfg.pages = pages;
  152. buf_cfg.size = size;
  153. buf_cfg.be_alloc = front_info->cfg.be_alloc;
  154. shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
  155. if (IS_ERR(shbuf))
  156. return PTR_ERR(shbuf);
  157. ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
  158. if (ret < 0) {
  159. xen_drm_front_shbuf_free(shbuf);
  160. return ret;
  161. }
  162. mutex_lock(&evtchnl->u.req.req_io_lock);
  163. spin_lock_irqsave(&front_info->io_lock, flags);
  164. req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
  165. req->op.dbuf_create.gref_directory =
  166. xen_drm_front_shbuf_get_dir_start(shbuf);
  167. req->op.dbuf_create.buffer_sz = size;
  168. req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
  169. req->op.dbuf_create.width = width;
  170. req->op.dbuf_create.height = height;
  171. req->op.dbuf_create.bpp = bpp;
  172. if (buf_cfg.be_alloc)
  173. req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
  174. ret = be_stream_do_io(evtchnl, req);
  175. spin_unlock_irqrestore(&front_info->io_lock, flags);
  176. if (ret < 0)
  177. goto fail;
  178. ret = be_stream_wait_io(evtchnl);
  179. if (ret < 0)
  180. goto fail;
  181. ret = xen_drm_front_shbuf_map(shbuf);
  182. if (ret < 0)
  183. goto fail;
  184. mutex_unlock(&evtchnl->u.req.req_io_lock);
  185. return 0;
  186. fail:
  187. mutex_unlock(&evtchnl->u.req.req_io_lock);
  188. dbuf_free(&front_info->dbuf_list, dbuf_cookie);
  189. return ret;
  190. }
  191. static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
  192. u64 dbuf_cookie)
  193. {
  194. struct xen_drm_front_evtchnl *evtchnl;
  195. struct xendispl_req *req;
  196. unsigned long flags;
  197. bool be_alloc;
  198. int ret;
  199. evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
  200. if (unlikely(!evtchnl))
  201. return -EIO;
  202. be_alloc = front_info->cfg.be_alloc;
  203. /*
  204. * For the backend allocated buffer release references now, so backend
  205. * can free the buffer.
  206. */
  207. if (be_alloc)
  208. dbuf_free(&front_info->dbuf_list, dbuf_cookie);
  209. mutex_lock(&evtchnl->u.req.req_io_lock);
  210. spin_lock_irqsave(&front_info->io_lock, flags);
  211. req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
  212. req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
  213. ret = be_stream_do_io(evtchnl, req);
  214. spin_unlock_irqrestore(&front_info->io_lock, flags);
  215. if (ret == 0)
  216. ret = be_stream_wait_io(evtchnl);
  217. /*
  218. * Do this regardless of communication status with the backend:
  219. * if we cannot remove remote resources remove what we can locally.
  220. */
  221. if (!be_alloc)
  222. dbuf_free(&front_info->dbuf_list, dbuf_cookie);
  223. mutex_unlock(&evtchnl->u.req.req_io_lock);
  224. return ret;
  225. }
  226. int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
  227. u64 dbuf_cookie, u64 fb_cookie, u32 width,
  228. u32 height, u32 pixel_format)
  229. {
  230. struct xen_drm_front_evtchnl *evtchnl;
  231. struct xen_drm_front_dbuf *buf;
  232. struct xendispl_req *req;
  233. unsigned long flags;
  234. int ret;
  235. evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
  236. if (unlikely(!evtchnl))
  237. return -EIO;
  238. buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
  239. if (!buf)
  240. return -EINVAL;
  241. buf->fb_cookie = fb_cookie;
  242. mutex_lock(&evtchnl->u.req.req_io_lock);
  243. spin_lock_irqsave(&front_info->io_lock, flags);
  244. req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
  245. req->op.fb_attach.dbuf_cookie = dbuf_cookie;
  246. req->op.fb_attach.fb_cookie = fb_cookie;
  247. req->op.fb_attach.width = width;
  248. req->op.fb_attach.height = height;
  249. req->op.fb_attach.pixel_format = pixel_format;
  250. ret = be_stream_do_io(evtchnl, req);
  251. spin_unlock_irqrestore(&front_info->io_lock, flags);
  252. if (ret == 0)
  253. ret = be_stream_wait_io(evtchnl);
  254. mutex_unlock(&evtchnl->u.req.req_io_lock);
  255. return ret;
  256. }
  257. int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
  258. u64 fb_cookie)
  259. {
  260. struct xen_drm_front_evtchnl *evtchnl;
  261. struct xendispl_req *req;
  262. unsigned long flags;
  263. int ret;
  264. evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
  265. if (unlikely(!evtchnl))
  266. return -EIO;
  267. mutex_lock(&evtchnl->u.req.req_io_lock);
  268. spin_lock_irqsave(&front_info->io_lock, flags);
  269. req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
  270. req->op.fb_detach.fb_cookie = fb_cookie;
  271. ret = be_stream_do_io(evtchnl, req);
  272. spin_unlock_irqrestore(&front_info->io_lock, flags);
  273. if (ret == 0)
  274. ret = be_stream_wait_io(evtchnl);
  275. mutex_unlock(&evtchnl->u.req.req_io_lock);
  276. return ret;
  277. }
  278. int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
  279. int conn_idx, u64 fb_cookie)
  280. {
  281. struct xen_drm_front_evtchnl *evtchnl;
  282. struct xendispl_req *req;
  283. unsigned long flags;
  284. int ret;
  285. if (unlikely(conn_idx >= front_info->num_evt_pairs))
  286. return -EINVAL;
  287. dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
  288. evtchnl = &front_info->evt_pairs[conn_idx].req;
  289. mutex_lock(&evtchnl->u.req.req_io_lock);
  290. spin_lock_irqsave(&front_info->io_lock, flags);
  291. req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
  292. req->op.pg_flip.fb_cookie = fb_cookie;
  293. ret = be_stream_do_io(evtchnl, req);
  294. spin_unlock_irqrestore(&front_info->io_lock, flags);
  295. if (ret == 0)
  296. ret = be_stream_wait_io(evtchnl);
  297. mutex_unlock(&evtchnl->u.req.req_io_lock);
  298. return ret;
  299. }
  300. void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
  301. int conn_idx, u64 fb_cookie)
  302. {
  303. struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
  304. if (unlikely(conn_idx >= front_info->cfg.num_connectors))
  305. return;
  306. xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
  307. fb_cookie);
  308. }
  309. static int xen_drm_drv_dumb_create(struct drm_file *filp,
  310. struct drm_device *dev,
  311. struct drm_mode_create_dumb *args)
  312. {
  313. struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  314. struct drm_gem_object *obj;
  315. int ret;
  316. /*
  317. * Dumb creation is a two stage process: first we create a fully
  318. * constructed GEM object which is communicated to the backend, and
  319. * only after that we can create GEM's handle. This is done so,
  320. * because of the possible races: once you create a handle it becomes
  321. * immediately visible to user-space, so the latter can try accessing
  322. * object without pages etc.
  323. * For details also see drm_gem_handle_create
  324. */
  325. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  326. args->size = args->pitch * args->height;
  327. obj = xen_drm_front_gem_create(dev, args->size);
  328. if (IS_ERR_OR_NULL(obj)) {
  329. ret = PTR_ERR(obj);
  330. goto fail;
  331. }
  332. ret = xen_drm_front_dbuf_create(drm_info->front_info,
  333. xen_drm_front_dbuf_to_cookie(obj),
  334. args->width, args->height, args->bpp,
  335. args->size,
  336. xen_drm_front_gem_get_pages(obj));
  337. if (ret)
  338. goto fail_backend;
  339. /* This is the tail of GEM object creation */
  340. ret = drm_gem_handle_create(filp, obj, &args->handle);
  341. if (ret)
  342. goto fail_handle;
  343. /* Drop reference from allocate - handle holds it now */
  344. drm_gem_object_put_unlocked(obj);
  345. return 0;
  346. fail_handle:
  347. xen_drm_front_dbuf_destroy(drm_info->front_info,
  348. xen_drm_front_dbuf_to_cookie(obj));
  349. fail_backend:
  350. /* drop reference from allocate */
  351. drm_gem_object_put_unlocked(obj);
  352. fail:
  353. DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
  354. return ret;
  355. }
  356. static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
  357. {
  358. struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
  359. int idx;
  360. if (drm_dev_enter(obj->dev, &idx)) {
  361. xen_drm_front_dbuf_destroy(drm_info->front_info,
  362. xen_drm_front_dbuf_to_cookie(obj));
  363. drm_dev_exit(idx);
  364. } else {
  365. dbuf_free(&drm_info->front_info->dbuf_list,
  366. xen_drm_front_dbuf_to_cookie(obj));
  367. }
  368. xen_drm_front_gem_free_object_unlocked(obj);
  369. }
  370. static void xen_drm_drv_release(struct drm_device *dev)
  371. {
  372. struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  373. struct xen_drm_front_info *front_info = drm_info->front_info;
  374. xen_drm_front_kms_fini(drm_info);
  375. drm_atomic_helper_shutdown(dev);
  376. drm_mode_config_cleanup(dev);
  377. drm_dev_fini(dev);
  378. kfree(dev);
  379. if (front_info->cfg.be_alloc)
  380. xenbus_switch_state(front_info->xb_dev,
  381. XenbusStateInitialising);
  382. kfree(drm_info);
  383. }
  384. static const struct file_operations xen_drm_dev_fops = {
  385. .owner = THIS_MODULE,
  386. .open = drm_open,
  387. .release = drm_release,
  388. .unlocked_ioctl = drm_ioctl,
  389. #ifdef CONFIG_COMPAT
  390. .compat_ioctl = drm_compat_ioctl,
  391. #endif
  392. .poll = drm_poll,
  393. .read = drm_read,
  394. .llseek = no_llseek,
  395. .mmap = xen_drm_front_gem_mmap,
  396. };
  397. static const struct vm_operations_struct xen_drm_drv_vm_ops = {
  398. .open = drm_gem_vm_open,
  399. .close = drm_gem_vm_close,
  400. };
  401. static struct drm_driver xen_drm_driver = {
  402. .driver_features = DRIVER_GEM | DRIVER_MODESET |
  403. DRIVER_PRIME | DRIVER_ATOMIC,
  404. .release = xen_drm_drv_release,
  405. .gem_vm_ops = &xen_drm_drv_vm_ops,
  406. .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
  407. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  408. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  409. .gem_prime_import = drm_gem_prime_import,
  410. .gem_prime_export = drm_gem_prime_export,
  411. .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
  412. .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
  413. .gem_prime_vmap = xen_drm_front_gem_prime_vmap,
  414. .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
  415. .gem_prime_mmap = xen_drm_front_gem_prime_mmap,
  416. .dumb_create = xen_drm_drv_dumb_create,
  417. .fops = &xen_drm_dev_fops,
  418. .name = "xendrm-du",
  419. .desc = "Xen PV DRM Display Unit",
  420. .date = "20180221",
  421. .major = 1,
  422. .minor = 0,
  423. };
  424. static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
  425. {
  426. struct device *dev = &front_info->xb_dev->dev;
  427. struct xen_drm_front_drm_info *drm_info;
  428. struct drm_device *drm_dev;
  429. int ret;
  430. DRM_INFO("Creating %s\n", xen_drm_driver.desc);
  431. drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
  432. if (!drm_info) {
  433. ret = -ENOMEM;
  434. goto fail;
  435. }
  436. drm_info->front_info = front_info;
  437. front_info->drm_info = drm_info;
  438. drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
  439. if (IS_ERR(drm_dev)) {
  440. ret = PTR_ERR(drm_dev);
  441. goto fail;
  442. }
  443. drm_info->drm_dev = drm_dev;
  444. drm_dev->dev_private = drm_info;
  445. ret = xen_drm_front_kms_init(drm_info);
  446. if (ret) {
  447. DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
  448. goto fail_modeset;
  449. }
  450. ret = drm_dev_register(drm_dev, 0);
  451. if (ret)
  452. goto fail_register;
  453. DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
  454. xen_drm_driver.name, xen_drm_driver.major,
  455. xen_drm_driver.minor, xen_drm_driver.patchlevel,
  456. xen_drm_driver.date, drm_dev->primary->index);
  457. return 0;
  458. fail_register:
  459. drm_dev_unregister(drm_dev);
  460. fail_modeset:
  461. drm_kms_helper_poll_fini(drm_dev);
  462. drm_mode_config_cleanup(drm_dev);
  463. fail:
  464. kfree(drm_info);
  465. return ret;
  466. }
  467. static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
  468. {
  469. struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
  470. struct drm_device *dev;
  471. if (!drm_info)
  472. return;
  473. dev = drm_info->drm_dev;
  474. if (!dev)
  475. return;
  476. /* Nothing to do if device is already unplugged */
  477. if (drm_dev_is_unplugged(dev))
  478. return;
  479. drm_kms_helper_poll_fini(dev);
  480. drm_dev_unplug(dev);
  481. front_info->drm_info = NULL;
  482. xen_drm_front_evtchnl_free_all(front_info);
  483. dbuf_free_all(&front_info->dbuf_list);
  484. /*
  485. * If we are not using backend allocated buffers, then tell the
  486. * backend we are ready to (re)initialize. Otherwise, wait for
  487. * drm_driver.release.
  488. */
  489. if (!front_info->cfg.be_alloc)
  490. xenbus_switch_state(front_info->xb_dev,
  491. XenbusStateInitialising);
  492. }
  493. static int displback_initwait(struct xen_drm_front_info *front_info)
  494. {
  495. struct xen_drm_front_cfg *cfg = &front_info->cfg;
  496. int ret;
  497. cfg->front_info = front_info;
  498. ret = xen_drm_front_cfg_card(front_info, cfg);
  499. if (ret < 0)
  500. return ret;
  501. DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
  502. /* Create event channels for all connectors and publish */
  503. ret = xen_drm_front_evtchnl_create_all(front_info);
  504. if (ret < 0)
  505. return ret;
  506. return xen_drm_front_evtchnl_publish_all(front_info);
  507. }
  508. static int displback_connect(struct xen_drm_front_info *front_info)
  509. {
  510. xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
  511. return xen_drm_drv_init(front_info);
  512. }
  513. static void displback_disconnect(struct xen_drm_front_info *front_info)
  514. {
  515. if (!front_info->drm_info)
  516. return;
  517. /* Tell the backend to wait until we release the DRM driver. */
  518. xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
  519. xen_drm_drv_fini(front_info);
  520. }
  521. static void displback_changed(struct xenbus_device *xb_dev,
  522. enum xenbus_state backend_state)
  523. {
  524. struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
  525. int ret;
  526. DRM_DEBUG("Backend state is %s, front is %s\n",
  527. xenbus_strstate(backend_state),
  528. xenbus_strstate(xb_dev->state));
  529. switch (backend_state) {
  530. case XenbusStateReconfiguring:
  531. /* fall through */
  532. case XenbusStateReconfigured:
  533. /* fall through */
  534. case XenbusStateInitialised:
  535. break;
  536. case XenbusStateInitialising:
  537. if (xb_dev->state == XenbusStateReconfiguring)
  538. break;
  539. /* recovering after backend unexpected closure */
  540. displback_disconnect(front_info);
  541. break;
  542. case XenbusStateInitWait:
  543. if (xb_dev->state == XenbusStateReconfiguring)
  544. break;
  545. /* recovering after backend unexpected closure */
  546. displback_disconnect(front_info);
  547. if (xb_dev->state != XenbusStateInitialising)
  548. break;
  549. ret = displback_initwait(front_info);
  550. if (ret < 0)
  551. xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
  552. else
  553. xenbus_switch_state(xb_dev, XenbusStateInitialised);
  554. break;
  555. case XenbusStateConnected:
  556. if (xb_dev->state != XenbusStateInitialised)
  557. break;
  558. ret = displback_connect(front_info);
  559. if (ret < 0) {
  560. displback_disconnect(front_info);
  561. xenbus_dev_fatal(xb_dev, ret, "connecting backend");
  562. } else {
  563. xenbus_switch_state(xb_dev, XenbusStateConnected);
  564. }
  565. break;
  566. case XenbusStateClosing:
  567. /*
  568. * in this state backend starts freeing resources,
  569. * so let it go into closed state, so we can also
  570. * remove ours
  571. */
  572. break;
  573. case XenbusStateUnknown:
  574. /* fall through */
  575. case XenbusStateClosed:
  576. if (xb_dev->state == XenbusStateClosed)
  577. break;
  578. displback_disconnect(front_info);
  579. break;
  580. }
  581. }
  582. static int xen_drv_probe(struct xenbus_device *xb_dev,
  583. const struct xenbus_device_id *id)
  584. {
  585. struct xen_drm_front_info *front_info;
  586. struct device *dev = &xb_dev->dev;
  587. int ret;
  588. /*
  589. * The device is not spawn from a device tree, so arch_setup_dma_ops
  590. * is not called, thus leaving the device with dummy DMA ops.
  591. * This makes the device return error on PRIME buffer import, which
  592. * is not correct: to fix this call of_dma_configure() with a NULL
  593. * node to set default DMA ops.
  594. */
  595. dev->coherent_dma_mask = DMA_BIT_MASK(32);
  596. ret = of_dma_configure(dev, NULL, true);
  597. if (ret < 0) {
  598. DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
  599. return ret;
  600. }
  601. front_info = devm_kzalloc(&xb_dev->dev,
  602. sizeof(*front_info), GFP_KERNEL);
  603. if (!front_info)
  604. return -ENOMEM;
  605. front_info->xb_dev = xb_dev;
  606. spin_lock_init(&front_info->io_lock);
  607. INIT_LIST_HEAD(&front_info->dbuf_list);
  608. dev_set_drvdata(&xb_dev->dev, front_info);
  609. return xenbus_switch_state(xb_dev, XenbusStateInitialising);
  610. }
  611. static int xen_drv_remove(struct xenbus_device *dev)
  612. {
  613. struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
  614. int to = 100;
  615. xenbus_switch_state(dev, XenbusStateClosing);
  616. /*
  617. * On driver removal it is disconnected from XenBus,
  618. * so no backend state change events come via .otherend_changed
  619. * callback. This prevents us from exiting gracefully, e.g.
  620. * signaling the backend to free event channels, waiting for its
  621. * state to change to XenbusStateClosed and cleaning at our end.
  622. * Normally when front driver removed backend will finally go into
  623. * XenbusStateInitWait state.
  624. *
  625. * Workaround: read backend's state manually and wait with time-out.
  626. */
  627. while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
  628. XenbusStateUnknown) != XenbusStateInitWait) &&
  629. --to)
  630. msleep(10);
  631. if (!to) {
  632. unsigned int state;
  633. state = xenbus_read_unsigned(front_info->xb_dev->otherend,
  634. "state", XenbusStateUnknown);
  635. DRM_ERROR("Backend state is %s while removing driver\n",
  636. xenbus_strstate(state));
  637. }
  638. xen_drm_drv_fini(front_info);
  639. xenbus_frontend_closed(dev);
  640. return 0;
  641. }
  642. static const struct xenbus_device_id xen_driver_ids[] = {
  643. { XENDISPL_DRIVER_NAME },
  644. { "" }
  645. };
  646. static struct xenbus_driver xen_driver = {
  647. .ids = xen_driver_ids,
  648. .probe = xen_drv_probe,
  649. .remove = xen_drv_remove,
  650. .otherend_changed = displback_changed,
  651. };
  652. static int __init xen_drv_init(void)
  653. {
  654. /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
  655. if (XEN_PAGE_SIZE != PAGE_SIZE) {
  656. DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
  657. XEN_PAGE_SIZE, PAGE_SIZE);
  658. return -ENODEV;
  659. }
  660. if (!xen_domain())
  661. return -ENODEV;
  662. if (!xen_has_pv_devices())
  663. return -ENODEV;
  664. DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
  665. return xenbus_register_frontend(&xen_driver);
  666. }
  667. static void __exit xen_drv_fini(void)
  668. {
  669. DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
  670. xenbus_unregister_driver(&xen_driver);
  671. }
  672. module_init(xen_drv_init);
  673. module_exit(xen_drv_fini);
  674. MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
  675. MODULE_LICENSE("GPL");
  676. MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);