qxl_fb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * Copyright © 2013 Red Hat
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * David Airlie
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fb.h>
  28. #include "drmP.h"
  29. #include "drm/drm.h"
  30. #include "drm/drm_crtc.h"
  31. #include "drm/drm_crtc_helper.h"
  32. #include "qxl_drv.h"
  33. #include "qxl_object.h"
  34. #include "drm_fb_helper.h"
  35. #define QXL_DIRTY_DELAY (HZ / 30)
  36. struct qxl_fbdev {
  37. struct drm_fb_helper helper;
  38. struct qxl_framebuffer qfb;
  39. struct qxl_device *qdev;
  40. spinlock_t delayed_ops_lock;
  41. struct list_head delayed_ops;
  42. void *shadow;
  43. int size;
  44. /* dirty memory logging */
  45. struct {
  46. spinlock_t lock;
  47. unsigned x1;
  48. unsigned y1;
  49. unsigned x2;
  50. unsigned y2;
  51. } dirty;
  52. };
  53. static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
  54. struct qxl_device *qdev, struct fb_info *info,
  55. const struct fb_image *image)
  56. {
  57. qxl_fb_image->qdev = qdev;
  58. if (info) {
  59. qxl_fb_image->visual = info->fix.visual;
  60. if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
  61. qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
  62. memcpy(&qxl_fb_image->pseudo_palette,
  63. info->pseudo_palette,
  64. sizeof(qxl_fb_image->pseudo_palette));
  65. } else {
  66. /* fallback */
  67. if (image->depth == 1)
  68. qxl_fb_image->visual = FB_VISUAL_MONO10;
  69. else
  70. qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
  71. }
  72. if (image) {
  73. memcpy(&qxl_fb_image->fb_image, image,
  74. sizeof(qxl_fb_image->fb_image));
  75. }
  76. }
  77. static void qxl_fb_dirty_flush(struct fb_info *info)
  78. {
  79. struct qxl_fbdev *qfbdev = info->par;
  80. struct qxl_device *qdev = qfbdev->qdev;
  81. struct qxl_fb_image qxl_fb_image;
  82. struct fb_image *image = &qxl_fb_image.fb_image;
  83. unsigned long flags;
  84. u32 x1, x2, y1, y2;
  85. /* TODO: hard coding 32 bpp */
  86. int stride = qfbdev->qfb.base.pitches[0];
  87. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  88. x1 = qfbdev->dirty.x1;
  89. x2 = qfbdev->dirty.x2;
  90. y1 = qfbdev->dirty.y1;
  91. y2 = qfbdev->dirty.y2;
  92. qfbdev->dirty.x1 = 0;
  93. qfbdev->dirty.x2 = 0;
  94. qfbdev->dirty.y1 = 0;
  95. qfbdev->dirty.y2 = 0;
  96. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  97. /*
  98. * we are using a shadow draw buffer, at qdev->surface0_shadow
  99. */
  100. qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
  101. image->dx = x1;
  102. image->dy = y1;
  103. image->width = x2 - x1 + 1;
  104. image->height = y2 - y1 + 1;
  105. image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
  106. warnings */
  107. image->bg_color = 0;
  108. image->depth = 32; /* TODO: take from somewhere? */
  109. image->cmap.start = 0;
  110. image->cmap.len = 0;
  111. image->cmap.red = NULL;
  112. image->cmap.green = NULL;
  113. image->cmap.blue = NULL;
  114. image->cmap.transp = NULL;
  115. image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
  116. qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
  117. qxl_draw_opaque_fb(&qxl_fb_image, stride);
  118. }
  119. static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
  120. int x, int y, int width, int height)
  121. {
  122. struct qxl_device *qdev = qfbdev->qdev;
  123. unsigned long flags;
  124. int x2, y2;
  125. x2 = x + width - 1;
  126. y2 = y + height - 1;
  127. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  128. if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
  129. (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
  130. if (qfbdev->dirty.y1 < y)
  131. y = qfbdev->dirty.y1;
  132. if (qfbdev->dirty.y2 > y2)
  133. y2 = qfbdev->dirty.y2;
  134. if (qfbdev->dirty.x1 < x)
  135. x = qfbdev->dirty.x1;
  136. if (qfbdev->dirty.x2 > x2)
  137. x2 = qfbdev->dirty.x2;
  138. }
  139. qfbdev->dirty.x1 = x;
  140. qfbdev->dirty.x2 = x2;
  141. qfbdev->dirty.y1 = y;
  142. qfbdev->dirty.y2 = y2;
  143. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  144. schedule_work(&qdev->fb_work);
  145. }
  146. static void qxl_deferred_io(struct fb_info *info,
  147. struct list_head *pagelist)
  148. {
  149. struct qxl_fbdev *qfbdev = info->par;
  150. unsigned long start, end, min, max;
  151. struct page *page;
  152. int y1, y2;
  153. min = ULONG_MAX;
  154. max = 0;
  155. list_for_each_entry(page, pagelist, lru) {
  156. start = page->index << PAGE_SHIFT;
  157. end = start + PAGE_SIZE - 1;
  158. min = min(min, start);
  159. max = max(max, end);
  160. }
  161. if (min < max) {
  162. y1 = min / info->fix.line_length;
  163. y2 = (max / info->fix.line_length) + 1;
  164. qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
  165. }
  166. };
  167. static struct fb_deferred_io qxl_defio = {
  168. .delay = QXL_DIRTY_DELAY,
  169. .deferred_io = qxl_deferred_io,
  170. };
  171. static void qxl_fb_fillrect(struct fb_info *info,
  172. const struct fb_fillrect *rect)
  173. {
  174. struct qxl_fbdev *qfbdev = info->par;
  175. drm_fb_helper_sys_fillrect(info, rect);
  176. qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
  177. rect->height);
  178. }
  179. static void qxl_fb_copyarea(struct fb_info *info,
  180. const struct fb_copyarea *area)
  181. {
  182. struct qxl_fbdev *qfbdev = info->par;
  183. drm_fb_helper_sys_copyarea(info, area);
  184. qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
  185. area->height);
  186. }
  187. static void qxl_fb_imageblit(struct fb_info *info,
  188. const struct fb_image *image)
  189. {
  190. struct qxl_fbdev *qfbdev = info->par;
  191. drm_fb_helper_sys_imageblit(info, image);
  192. qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
  193. image->height);
  194. }
  195. static void qxl_fb_work(struct work_struct *work)
  196. {
  197. struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
  198. struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
  199. qxl_fb_dirty_flush(qfbdev->helper.fbdev);
  200. }
  201. int qxl_fb_init(struct qxl_device *qdev)
  202. {
  203. INIT_WORK(&qdev->fb_work, qxl_fb_work);
  204. return 0;
  205. }
  206. static struct fb_ops qxlfb_ops = {
  207. .owner = THIS_MODULE,
  208. .fb_check_var = drm_fb_helper_check_var,
  209. .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
  210. .fb_fillrect = qxl_fb_fillrect,
  211. .fb_copyarea = qxl_fb_copyarea,
  212. .fb_imageblit = qxl_fb_imageblit,
  213. .fb_pan_display = drm_fb_helper_pan_display,
  214. .fb_blank = drm_fb_helper_blank,
  215. .fb_setcmap = drm_fb_helper_setcmap,
  216. .fb_debug_enter = drm_fb_helper_debug_enter,
  217. .fb_debug_leave = drm_fb_helper_debug_leave,
  218. };
  219. static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
  220. {
  221. struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
  222. int ret;
  223. ret = qxl_bo_reserve(qbo, false);
  224. if (likely(ret == 0)) {
  225. qxl_bo_kunmap(qbo);
  226. qxl_bo_unpin(qbo);
  227. qxl_bo_unreserve(qbo);
  228. }
  229. drm_gem_object_unreference_unlocked(gobj);
  230. }
  231. int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
  232. struct drm_file *file_priv,
  233. uint32_t *handle)
  234. {
  235. int r;
  236. struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
  237. BUG_ON(!gobj);
  238. /* drm_get_handle_create adds a reference - good */
  239. r = drm_gem_handle_create(file_priv, gobj, handle);
  240. if (r)
  241. return r;
  242. return 0;
  243. }
  244. static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
  245. const struct drm_mode_fb_cmd2 *mode_cmd,
  246. struct drm_gem_object **gobj_p)
  247. {
  248. struct qxl_device *qdev = qfbdev->qdev;
  249. struct drm_gem_object *gobj = NULL;
  250. struct qxl_bo *qbo = NULL;
  251. int ret;
  252. int aligned_size, size;
  253. int height = mode_cmd->height;
  254. int bpp;
  255. int depth;
  256. drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
  257. size = mode_cmd->pitches[0] * height;
  258. aligned_size = ALIGN(size, PAGE_SIZE);
  259. /* TODO: unallocate and reallocate surface0 for real. Hack to just
  260. * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
  261. ret = qxl_gem_object_create(qdev, aligned_size, 0,
  262. QXL_GEM_DOMAIN_SURFACE,
  263. false, /* is discardable */
  264. false, /* is kernel (false means device) */
  265. NULL,
  266. &gobj);
  267. if (ret) {
  268. pr_err("failed to allocate framebuffer (%d)\n",
  269. aligned_size);
  270. return -ENOMEM;
  271. }
  272. qbo = gem_to_qxl_bo(gobj);
  273. qbo->surf.width = mode_cmd->width;
  274. qbo->surf.height = mode_cmd->height;
  275. qbo->surf.stride = mode_cmd->pitches[0];
  276. qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
  277. ret = qxl_bo_reserve(qbo, false);
  278. if (unlikely(ret != 0))
  279. goto out_unref;
  280. ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
  281. if (ret) {
  282. qxl_bo_unreserve(qbo);
  283. goto out_unref;
  284. }
  285. ret = qxl_bo_kmap(qbo, NULL);
  286. qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
  287. if (ret)
  288. goto out_unref;
  289. *gobj_p = gobj;
  290. return 0;
  291. out_unref:
  292. qxlfb_destroy_pinned_object(gobj);
  293. *gobj_p = NULL;
  294. return ret;
  295. }
  296. static int qxlfb_create(struct qxl_fbdev *qfbdev,
  297. struct drm_fb_helper_surface_size *sizes)
  298. {
  299. struct qxl_device *qdev = qfbdev->qdev;
  300. struct fb_info *info;
  301. struct drm_framebuffer *fb = NULL;
  302. struct drm_mode_fb_cmd2 mode_cmd;
  303. struct drm_gem_object *gobj = NULL;
  304. struct qxl_bo *qbo = NULL;
  305. int ret;
  306. int size;
  307. int bpp = sizes->surface_bpp;
  308. int depth = sizes->surface_depth;
  309. void *shadow;
  310. mode_cmd.width = sizes->surface_width;
  311. mode_cmd.height = sizes->surface_height;
  312. mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
  313. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  314. ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
  315. qbo = gem_to_qxl_bo(gobj);
  316. QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
  317. mode_cmd.height, mode_cmd.pitches[0]);
  318. shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
  319. /* TODO: what's the usual response to memory allocation errors? */
  320. BUG_ON(!shadow);
  321. QXL_INFO(qdev,
  322. "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
  323. qxl_bo_gpu_offset(qbo),
  324. qxl_bo_mmap_offset(qbo),
  325. qbo->kptr,
  326. shadow);
  327. size = mode_cmd.pitches[0] * mode_cmd.height;
  328. info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
  329. if (IS_ERR(info)) {
  330. ret = PTR_ERR(info);
  331. goto out_unref;
  332. }
  333. info->par = qfbdev;
  334. qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
  335. fb = &qfbdev->qfb.base;
  336. /* setup helper with fb data */
  337. qfbdev->helper.fb = fb;
  338. qfbdev->shadow = shadow;
  339. strcpy(info->fix.id, "qxldrmfb");
  340. drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
  341. info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
  342. info->fbops = &qxlfb_ops;
  343. /*
  344. * TODO: using gobj->size in various places in this function. Not sure
  345. * what the difference between the different sizes is.
  346. */
  347. info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
  348. info->fix.smem_len = gobj->size;
  349. info->screen_base = qfbdev->shadow;
  350. info->screen_size = gobj->size;
  351. drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
  352. sizes->fb_height);
  353. /* setup aperture base/size for vesafb takeover */
  354. info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
  355. info->apertures->ranges[0].size = qdev->vram_size;
  356. info->fix.mmio_start = 0;
  357. info->fix.mmio_len = 0;
  358. if (info->screen_base == NULL) {
  359. ret = -ENOSPC;
  360. goto out_destroy_fbi;
  361. }
  362. info->fbdefio = &qxl_defio;
  363. fb_deferred_io_init(info);
  364. qdev->fbdev_info = info;
  365. qdev->fbdev_qfb = &qfbdev->qfb;
  366. DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
  367. DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
  368. return 0;
  369. out_destroy_fbi:
  370. drm_fb_helper_release_fbi(&qfbdev->helper);
  371. out_unref:
  372. if (qbo) {
  373. ret = qxl_bo_reserve(qbo, false);
  374. if (likely(ret == 0)) {
  375. qxl_bo_kunmap(qbo);
  376. qxl_bo_unpin(qbo);
  377. qxl_bo_unreserve(qbo);
  378. }
  379. }
  380. if (fb && ret) {
  381. drm_gem_object_unreference(gobj);
  382. drm_framebuffer_cleanup(fb);
  383. kfree(fb);
  384. }
  385. drm_gem_object_unreference(gobj);
  386. return ret;
  387. }
  388. static int qxl_fb_find_or_create_single(
  389. struct drm_fb_helper *helper,
  390. struct drm_fb_helper_surface_size *sizes)
  391. {
  392. struct qxl_fbdev *qfbdev =
  393. container_of(helper, struct qxl_fbdev, helper);
  394. int new_fb = 0;
  395. int ret;
  396. if (!helper->fb) {
  397. ret = qxlfb_create(qfbdev, sizes);
  398. if (ret)
  399. return ret;
  400. new_fb = 1;
  401. }
  402. return new_fb;
  403. }
  404. static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
  405. {
  406. struct qxl_framebuffer *qfb = &qfbdev->qfb;
  407. drm_fb_helper_unregister_fbi(&qfbdev->helper);
  408. drm_fb_helper_release_fbi(&qfbdev->helper);
  409. if (qfb->obj) {
  410. qxlfb_destroy_pinned_object(qfb->obj);
  411. qfb->obj = NULL;
  412. }
  413. drm_fb_helper_fini(&qfbdev->helper);
  414. vfree(qfbdev->shadow);
  415. drm_framebuffer_cleanup(&qfb->base);
  416. return 0;
  417. }
  418. static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
  419. .fb_probe = qxl_fb_find_or_create_single,
  420. };
  421. int qxl_fbdev_init(struct qxl_device *qdev)
  422. {
  423. struct qxl_fbdev *qfbdev;
  424. int bpp_sel = 32; /* TODO: parameter from somewhere? */
  425. int ret;
  426. qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
  427. if (!qfbdev)
  428. return -ENOMEM;
  429. qfbdev->qdev = qdev;
  430. qdev->mode_info.qfbdev = qfbdev;
  431. spin_lock_init(&qfbdev->delayed_ops_lock);
  432. spin_lock_init(&qfbdev->dirty.lock);
  433. INIT_LIST_HEAD(&qfbdev->delayed_ops);
  434. drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
  435. &qxl_fb_helper_funcs);
  436. ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
  437. qxl_num_crtc /* num_crtc - QXL supports just 1 */,
  438. QXLFB_CONN_LIMIT);
  439. if (ret)
  440. goto free;
  441. ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
  442. if (ret)
  443. goto fini;
  444. ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
  445. if (ret)
  446. goto fini;
  447. return 0;
  448. fini:
  449. drm_fb_helper_fini(&qfbdev->helper);
  450. free:
  451. kfree(qfbdev);
  452. return ret;
  453. }
  454. void qxl_fbdev_fini(struct qxl_device *qdev)
  455. {
  456. if (!qdev->mode_info.qfbdev)
  457. return;
  458. qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
  459. kfree(qdev->mode_info.qfbdev);
  460. qdev->mode_info.qfbdev = NULL;
  461. }
  462. void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
  463. {
  464. drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
  465. }
  466. bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
  467. {
  468. if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
  469. return true;
  470. return false;
  471. }