qxl_fb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * Copyright © 2013 Red Hat
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * David Airlie
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fb.h>
  28. #include "drmP.h"
  29. #include "drm/drm.h"
  30. #include "drm/drm_crtc.h"
  31. #include "drm/drm_crtc_helper.h"
  32. #include "qxl_drv.h"
  33. #include "qxl_object.h"
  34. #include "drm_fb_helper.h"
  35. #define QXL_DIRTY_DELAY (HZ / 30)
  36. struct qxl_fbdev {
  37. struct drm_fb_helper helper;
  38. struct qxl_framebuffer qfb;
  39. struct list_head fbdev_list;
  40. struct qxl_device *qdev;
  41. spinlock_t delayed_ops_lock;
  42. struct list_head delayed_ops;
  43. void *shadow;
  44. int size;
  45. /* dirty memory logging */
  46. struct {
  47. spinlock_t lock;
  48. unsigned x1;
  49. unsigned y1;
  50. unsigned x2;
  51. unsigned y2;
  52. } dirty;
  53. };
  54. static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
  55. struct qxl_device *qdev, struct fb_info *info,
  56. const struct fb_image *image)
  57. {
  58. qxl_fb_image->qdev = qdev;
  59. if (info) {
  60. qxl_fb_image->visual = info->fix.visual;
  61. if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
  62. qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
  63. memcpy(&qxl_fb_image->pseudo_palette,
  64. info->pseudo_palette,
  65. sizeof(qxl_fb_image->pseudo_palette));
  66. } else {
  67. /* fallback */
  68. if (image->depth == 1)
  69. qxl_fb_image->visual = FB_VISUAL_MONO10;
  70. else
  71. qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
  72. }
  73. if (image) {
  74. memcpy(&qxl_fb_image->fb_image, image,
  75. sizeof(qxl_fb_image->fb_image));
  76. }
  77. }
  78. static void qxl_fb_dirty_flush(struct fb_info *info)
  79. {
  80. struct qxl_fbdev *qfbdev = info->par;
  81. struct qxl_device *qdev = qfbdev->qdev;
  82. struct qxl_fb_image qxl_fb_image;
  83. struct fb_image *image = &qxl_fb_image.fb_image;
  84. unsigned long flags;
  85. u32 x1, x2, y1, y2;
  86. /* TODO: hard coding 32 bpp */
  87. int stride = qfbdev->qfb.base.pitches[0];
  88. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  89. x1 = qfbdev->dirty.x1;
  90. x2 = qfbdev->dirty.x2;
  91. y1 = qfbdev->dirty.y1;
  92. y2 = qfbdev->dirty.y2;
  93. qfbdev->dirty.x1 = 0;
  94. qfbdev->dirty.x2 = 0;
  95. qfbdev->dirty.y1 = 0;
  96. qfbdev->dirty.y2 = 0;
  97. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  98. /*
  99. * we are using a shadow draw buffer, at qdev->surface0_shadow
  100. */
  101. qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
  102. image->dx = x1;
  103. image->dy = y1;
  104. image->width = x2 - x1 + 1;
  105. image->height = y2 - y1 + 1;
  106. image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
  107. warnings */
  108. image->bg_color = 0;
  109. image->depth = 32; /* TODO: take from somewhere? */
  110. image->cmap.start = 0;
  111. image->cmap.len = 0;
  112. image->cmap.red = NULL;
  113. image->cmap.green = NULL;
  114. image->cmap.blue = NULL;
  115. image->cmap.transp = NULL;
  116. image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
  117. qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
  118. qxl_draw_opaque_fb(&qxl_fb_image, stride);
  119. }
  120. static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
  121. int x, int y, int width, int height)
  122. {
  123. struct qxl_device *qdev = qfbdev->qdev;
  124. unsigned long flags;
  125. int x2, y2;
  126. x2 = x + width - 1;
  127. y2 = y + height - 1;
  128. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  129. if (qfbdev->dirty.y1 < y)
  130. y = qfbdev->dirty.y1;
  131. if (qfbdev->dirty.y2 > y2)
  132. y2 = qfbdev->dirty.y2;
  133. if (qfbdev->dirty.x1 < x)
  134. x = qfbdev->dirty.x1;
  135. if (qfbdev->dirty.x2 > x2)
  136. x2 = qfbdev->dirty.x2;
  137. qfbdev->dirty.x1 = x;
  138. qfbdev->dirty.x2 = x2;
  139. qfbdev->dirty.y1 = y;
  140. qfbdev->dirty.y2 = y2;
  141. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  142. schedule_work(&qdev->fb_work);
  143. }
  144. static void qxl_deferred_io(struct fb_info *info,
  145. struct list_head *pagelist)
  146. {
  147. struct qxl_fbdev *qfbdev = info->par;
  148. unsigned long start, end, min, max;
  149. struct page *page;
  150. int y1, y2;
  151. min = ULONG_MAX;
  152. max = 0;
  153. list_for_each_entry(page, pagelist, lru) {
  154. start = page->index << PAGE_SHIFT;
  155. end = start + PAGE_SIZE - 1;
  156. min = min(min, start);
  157. max = max(max, end);
  158. }
  159. if (min < max) {
  160. y1 = min / info->fix.line_length;
  161. y2 = (max / info->fix.line_length) + 1;
  162. qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
  163. }
  164. };
  165. static struct fb_deferred_io qxl_defio = {
  166. .delay = QXL_DIRTY_DELAY,
  167. .deferred_io = qxl_deferred_io,
  168. };
  169. static void qxl_fb_fillrect(struct fb_info *info,
  170. const struct fb_fillrect *rect)
  171. {
  172. struct qxl_fbdev *qfbdev = info->par;
  173. drm_fb_helper_sys_fillrect(info, rect);
  174. qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
  175. rect->height);
  176. }
  177. static void qxl_fb_copyarea(struct fb_info *info,
  178. const struct fb_copyarea *area)
  179. {
  180. struct qxl_fbdev *qfbdev = info->par;
  181. drm_fb_helper_sys_copyarea(info, area);
  182. qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
  183. area->height);
  184. }
  185. static void qxl_fb_imageblit(struct fb_info *info,
  186. const struct fb_image *image)
  187. {
  188. struct qxl_fbdev *qfbdev = info->par;
  189. drm_fb_helper_sys_imageblit(info, image);
  190. qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
  191. image->height);
  192. }
  193. static void qxl_fb_work(struct work_struct *work)
  194. {
  195. struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
  196. struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
  197. qxl_fb_dirty_flush(qfbdev->helper.fbdev);
  198. }
  199. int qxl_fb_init(struct qxl_device *qdev)
  200. {
  201. INIT_WORK(&qdev->fb_work, qxl_fb_work);
  202. return 0;
  203. }
  204. static struct fb_ops qxlfb_ops = {
  205. .owner = THIS_MODULE,
  206. .fb_check_var = drm_fb_helper_check_var,
  207. .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
  208. .fb_fillrect = qxl_fb_fillrect,
  209. .fb_copyarea = qxl_fb_copyarea,
  210. .fb_imageblit = qxl_fb_imageblit,
  211. .fb_pan_display = drm_fb_helper_pan_display,
  212. .fb_blank = drm_fb_helper_blank,
  213. .fb_setcmap = drm_fb_helper_setcmap,
  214. .fb_debug_enter = drm_fb_helper_debug_enter,
  215. .fb_debug_leave = drm_fb_helper_debug_leave,
  216. };
  217. static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
  218. {
  219. struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
  220. int ret;
  221. ret = qxl_bo_reserve(qbo, false);
  222. if (likely(ret == 0)) {
  223. qxl_bo_kunmap(qbo);
  224. qxl_bo_unpin(qbo);
  225. qxl_bo_unreserve(qbo);
  226. }
  227. drm_gem_object_unreference_unlocked(gobj);
  228. }
  229. int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
  230. struct drm_file *file_priv,
  231. uint32_t *handle)
  232. {
  233. int r;
  234. struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
  235. BUG_ON(!gobj);
  236. /* drm_get_handle_create adds a reference - good */
  237. r = drm_gem_handle_create(file_priv, gobj, handle);
  238. if (r)
  239. return r;
  240. return 0;
  241. }
  242. static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
  243. struct drm_mode_fb_cmd2 *mode_cmd,
  244. struct drm_gem_object **gobj_p)
  245. {
  246. struct qxl_device *qdev = qfbdev->qdev;
  247. struct drm_gem_object *gobj = NULL;
  248. struct qxl_bo *qbo = NULL;
  249. int ret;
  250. int aligned_size, size;
  251. int height = mode_cmd->height;
  252. int bpp;
  253. int depth;
  254. drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
  255. size = mode_cmd->pitches[0] * height;
  256. aligned_size = ALIGN(size, PAGE_SIZE);
  257. /* TODO: unallocate and reallocate surface0 for real. Hack to just
  258. * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
  259. ret = qxl_gem_object_create(qdev, aligned_size, 0,
  260. QXL_GEM_DOMAIN_SURFACE,
  261. false, /* is discardable */
  262. false, /* is kernel (false means device) */
  263. NULL,
  264. &gobj);
  265. if (ret) {
  266. pr_err("failed to allocate framebuffer (%d)\n",
  267. aligned_size);
  268. return -ENOMEM;
  269. }
  270. qbo = gem_to_qxl_bo(gobj);
  271. qbo->surf.width = mode_cmd->width;
  272. qbo->surf.height = mode_cmd->height;
  273. qbo->surf.stride = mode_cmd->pitches[0];
  274. qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
  275. ret = qxl_bo_reserve(qbo, false);
  276. if (unlikely(ret != 0))
  277. goto out_unref;
  278. ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
  279. if (ret) {
  280. qxl_bo_unreserve(qbo);
  281. goto out_unref;
  282. }
  283. ret = qxl_bo_kmap(qbo, NULL);
  284. qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
  285. if (ret)
  286. goto out_unref;
  287. *gobj_p = gobj;
  288. return 0;
  289. out_unref:
  290. qxlfb_destroy_pinned_object(gobj);
  291. *gobj_p = NULL;
  292. return ret;
  293. }
  294. static int qxlfb_create(struct qxl_fbdev *qfbdev,
  295. struct drm_fb_helper_surface_size *sizes)
  296. {
  297. struct qxl_device *qdev = qfbdev->qdev;
  298. struct fb_info *info;
  299. struct drm_framebuffer *fb = NULL;
  300. struct drm_mode_fb_cmd2 mode_cmd;
  301. struct drm_gem_object *gobj = NULL;
  302. struct qxl_bo *qbo = NULL;
  303. int ret;
  304. int size;
  305. int bpp = sizes->surface_bpp;
  306. int depth = sizes->surface_depth;
  307. void *shadow;
  308. mode_cmd.width = sizes->surface_width;
  309. mode_cmd.height = sizes->surface_height;
  310. mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
  311. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  312. ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
  313. qbo = gem_to_qxl_bo(gobj);
  314. QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
  315. mode_cmd.height, mode_cmd.pitches[0]);
  316. shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
  317. /* TODO: what's the usual response to memory allocation errors? */
  318. BUG_ON(!shadow);
  319. QXL_INFO(qdev,
  320. "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
  321. qxl_bo_gpu_offset(qbo),
  322. qxl_bo_mmap_offset(qbo),
  323. qbo->kptr,
  324. shadow);
  325. size = mode_cmd.pitches[0] * mode_cmd.height;
  326. info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
  327. if (IS_ERR(info)) {
  328. ret = PTR_ERR(info);
  329. goto out_unref;
  330. }
  331. info->par = qfbdev;
  332. qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
  333. fb = &qfbdev->qfb.base;
  334. /* setup helper with fb data */
  335. qfbdev->helper.fb = fb;
  336. qfbdev->shadow = shadow;
  337. strcpy(info->fix.id, "qxldrmfb");
  338. drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
  339. info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
  340. info->fbops = &qxlfb_ops;
  341. /*
  342. * TODO: using gobj->size in various places in this function. Not sure
  343. * what the difference between the different sizes is.
  344. */
  345. info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
  346. info->fix.smem_len = gobj->size;
  347. info->screen_base = qfbdev->shadow;
  348. info->screen_size = gobj->size;
  349. drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
  350. sizes->fb_height);
  351. /* setup aperture base/size for vesafb takeover */
  352. info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
  353. info->apertures->ranges[0].size = qdev->vram_size;
  354. info->fix.mmio_start = 0;
  355. info->fix.mmio_len = 0;
  356. if (info->screen_base == NULL) {
  357. ret = -ENOSPC;
  358. goto out_destroy_fbi;
  359. }
  360. info->fbdefio = &qxl_defio;
  361. fb_deferred_io_init(info);
  362. qdev->fbdev_info = info;
  363. qdev->fbdev_qfb = &qfbdev->qfb;
  364. DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
  365. DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
  366. return 0;
  367. out_destroy_fbi:
  368. drm_fb_helper_release_fbi(&qfbdev->helper);
  369. out_unref:
  370. if (qbo) {
  371. ret = qxl_bo_reserve(qbo, false);
  372. if (likely(ret == 0)) {
  373. qxl_bo_kunmap(qbo);
  374. qxl_bo_unpin(qbo);
  375. qxl_bo_unreserve(qbo);
  376. }
  377. }
  378. if (fb && ret) {
  379. drm_gem_object_unreference(gobj);
  380. drm_framebuffer_cleanup(fb);
  381. kfree(fb);
  382. }
  383. drm_gem_object_unreference(gobj);
  384. return ret;
  385. }
  386. static int qxl_fb_find_or_create_single(
  387. struct drm_fb_helper *helper,
  388. struct drm_fb_helper_surface_size *sizes)
  389. {
  390. struct qxl_fbdev *qfbdev =
  391. container_of(helper, struct qxl_fbdev, helper);
  392. int new_fb = 0;
  393. int ret;
  394. if (!helper->fb) {
  395. ret = qxlfb_create(qfbdev, sizes);
  396. if (ret)
  397. return ret;
  398. new_fb = 1;
  399. }
  400. return new_fb;
  401. }
  402. static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
  403. {
  404. struct qxl_framebuffer *qfb = &qfbdev->qfb;
  405. drm_fb_helper_unregister_fbi(&qfbdev->helper);
  406. drm_fb_helper_release_fbi(&qfbdev->helper);
  407. if (qfb->obj) {
  408. qxlfb_destroy_pinned_object(qfb->obj);
  409. qfb->obj = NULL;
  410. }
  411. drm_fb_helper_fini(&qfbdev->helper);
  412. vfree(qfbdev->shadow);
  413. drm_framebuffer_cleanup(&qfb->base);
  414. return 0;
  415. }
  416. static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
  417. .fb_probe = qxl_fb_find_or_create_single,
  418. };
  419. int qxl_fbdev_init(struct qxl_device *qdev)
  420. {
  421. struct qxl_fbdev *qfbdev;
  422. int bpp_sel = 32; /* TODO: parameter from somewhere? */
  423. int ret;
  424. qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
  425. if (!qfbdev)
  426. return -ENOMEM;
  427. qfbdev->qdev = qdev;
  428. qdev->mode_info.qfbdev = qfbdev;
  429. spin_lock_init(&qfbdev->delayed_ops_lock);
  430. spin_lock_init(&qfbdev->dirty.lock);
  431. INIT_LIST_HEAD(&qfbdev->delayed_ops);
  432. drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
  433. &qxl_fb_helper_funcs);
  434. ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
  435. qxl_num_crtc /* num_crtc - QXL supports just 1 */,
  436. QXLFB_CONN_LIMIT);
  437. if (ret)
  438. goto free;
  439. ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
  440. if (ret)
  441. goto fini;
  442. ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
  443. if (ret)
  444. goto fini;
  445. return 0;
  446. fini:
  447. drm_fb_helper_fini(&qfbdev->helper);
  448. free:
  449. kfree(qfbdev);
  450. return ret;
  451. }
  452. void qxl_fbdev_fini(struct qxl_device *qdev)
  453. {
  454. if (!qdev->mode_info.qfbdev)
  455. return;
  456. qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
  457. kfree(qdev->mode_info.qfbdev);
  458. qdev->mode_info.qfbdev = NULL;
  459. }
  460. void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
  461. {
  462. drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
  463. }
  464. bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
  465. {
  466. if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
  467. return true;
  468. return false;
  469. }