qxl_display.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Dave Airlie
  23. * Alon Levy
  24. */
  25. #include <linux/crc32.h>
  26. #include <drm/drm_crtc_helper.h>
  27. #include <drm/drm_plane_helper.h>
  28. #include <drm/drm_atomic_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include "qxl_drv.h"
  31. #include "qxl_object.h"
  32. static bool qxl_head_enabled(struct qxl_head *head)
  33. {
  34. return head->width && head->height;
  35. }
  36. static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
  37. {
  38. if (qdev->client_monitors_config &&
  39. count > qdev->client_monitors_config->count) {
  40. kfree(qdev->client_monitors_config);
  41. qdev->client_monitors_config = NULL;
  42. }
  43. if (!qdev->client_monitors_config) {
  44. qdev->client_monitors_config = kzalloc(
  45. sizeof(struct qxl_monitors_config) +
  46. sizeof(struct qxl_head) * count, GFP_KERNEL);
  47. if (!qdev->client_monitors_config) {
  48. qxl_io_log(qdev,
  49. "%s: allocation failure for %u heads\n",
  50. __func__, count);
  51. return;
  52. }
  53. }
  54. qdev->client_monitors_config->count = count;
  55. }
  56. enum {
  57. MONITORS_CONFIG_MODIFIED,
  58. MONITORS_CONFIG_UNCHANGED,
  59. MONITORS_CONFIG_BAD_CRC,
  60. };
  61. static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
  62. {
  63. int i;
  64. int num_monitors;
  65. uint32_t crc;
  66. int status = MONITORS_CONFIG_UNCHANGED;
  67. num_monitors = qdev->rom->client_monitors_config.count;
  68. crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
  69. sizeof(qdev->rom->client_monitors_config));
  70. if (crc != qdev->rom->client_monitors_config_crc) {
  71. qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
  72. sizeof(qdev->rom->client_monitors_config),
  73. qdev->rom->client_monitors_config_crc);
  74. return MONITORS_CONFIG_BAD_CRC;
  75. }
  76. if (!num_monitors) {
  77. DRM_DEBUG_KMS("no client monitors configured\n");
  78. return status;
  79. }
  80. if (num_monitors > qdev->monitors_config->max_allowed) {
  81. DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
  82. qdev->monitors_config->max_allowed, num_monitors);
  83. num_monitors = qdev->monitors_config->max_allowed;
  84. } else {
  85. num_monitors = qdev->rom->client_monitors_config.count;
  86. }
  87. if (qdev->client_monitors_config
  88. && (num_monitors != qdev->client_monitors_config->count)) {
  89. status = MONITORS_CONFIG_MODIFIED;
  90. }
  91. qxl_alloc_client_monitors_config(qdev, num_monitors);
  92. /* we copy max from the client but it isn't used */
  93. qdev->client_monitors_config->max_allowed =
  94. qdev->monitors_config->max_allowed;
  95. for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
  96. struct qxl_urect *c_rect =
  97. &qdev->rom->client_monitors_config.heads[i];
  98. struct qxl_head *client_head =
  99. &qdev->client_monitors_config->heads[i];
  100. if (client_head->x != c_rect->left) {
  101. client_head->x = c_rect->left;
  102. status = MONITORS_CONFIG_MODIFIED;
  103. }
  104. if (client_head->y != c_rect->top) {
  105. client_head->y = c_rect->top;
  106. status = MONITORS_CONFIG_MODIFIED;
  107. }
  108. if (client_head->width != c_rect->right - c_rect->left) {
  109. client_head->width = c_rect->right - c_rect->left;
  110. status = MONITORS_CONFIG_MODIFIED;
  111. }
  112. if (client_head->height != c_rect->bottom - c_rect->top) {
  113. client_head->height = c_rect->bottom - c_rect->top;
  114. status = MONITORS_CONFIG_MODIFIED;
  115. }
  116. if (client_head->surface_id != 0) {
  117. client_head->surface_id = 0;
  118. status = MONITORS_CONFIG_MODIFIED;
  119. }
  120. if (client_head->id != i) {
  121. client_head->id = i;
  122. status = MONITORS_CONFIG_MODIFIED;
  123. }
  124. if (client_head->flags != 0) {
  125. client_head->flags = 0;
  126. status = MONITORS_CONFIG_MODIFIED;
  127. }
  128. DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
  129. client_head->x, client_head->y);
  130. }
  131. return status;
  132. }
  133. static void qxl_update_offset_props(struct qxl_device *qdev)
  134. {
  135. struct drm_device *dev = &qdev->ddev;
  136. struct drm_connector *connector;
  137. struct qxl_output *output;
  138. struct qxl_head *head;
  139. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  140. output = drm_connector_to_qxl_output(connector);
  141. head = &qdev->client_monitors_config->heads[output->index];
  142. drm_object_property_set_value(&connector->base,
  143. dev->mode_config.suggested_x_property, head->x);
  144. drm_object_property_set_value(&connector->base,
  145. dev->mode_config.suggested_y_property, head->y);
  146. }
  147. }
  148. void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
  149. {
  150. struct drm_device *dev = &qdev->ddev;
  151. int status, retries;
  152. for (retries = 0; retries < 10; retries++) {
  153. status = qxl_display_copy_rom_client_monitors_config(qdev);
  154. if (status != MONITORS_CONFIG_BAD_CRC)
  155. break;
  156. udelay(5);
  157. }
  158. if (status == MONITORS_CONFIG_BAD_CRC) {
  159. qxl_io_log(qdev, "config: bad crc\n");
  160. DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
  161. return;
  162. }
  163. if (status == MONITORS_CONFIG_UNCHANGED) {
  164. qxl_io_log(qdev, "config: unchanged\n");
  165. DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
  166. return;
  167. }
  168. drm_modeset_lock_all(dev);
  169. qxl_update_offset_props(qdev);
  170. drm_modeset_unlock_all(dev);
  171. if (!drm_helper_hpd_irq_event(dev)) {
  172. /* notify that the monitor configuration changed, to
  173. adjust at the arbitrary resolution */
  174. drm_kms_helper_hotplug_event(dev);
  175. }
  176. }
  177. static int qxl_add_monitors_config_modes(struct drm_connector *connector,
  178. unsigned *pwidth,
  179. unsigned *pheight)
  180. {
  181. struct drm_device *dev = connector->dev;
  182. struct qxl_device *qdev = dev->dev_private;
  183. struct qxl_output *output = drm_connector_to_qxl_output(connector);
  184. int h = output->index;
  185. struct drm_display_mode *mode = NULL;
  186. struct qxl_head *head;
  187. if (!qdev->monitors_config)
  188. return 0;
  189. if (h >= qdev->monitors_config->max_allowed)
  190. return 0;
  191. if (!qdev->client_monitors_config)
  192. return 0;
  193. if (h >= qdev->client_monitors_config->count)
  194. return 0;
  195. head = &qdev->client_monitors_config->heads[h];
  196. DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height);
  197. mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
  198. false);
  199. mode->type |= DRM_MODE_TYPE_PREFERRED;
  200. mode->hdisplay = head->width;
  201. mode->vdisplay = head->height;
  202. drm_mode_set_name(mode);
  203. *pwidth = head->width;
  204. *pheight = head->height;
  205. drm_mode_probed_add(connector, mode);
  206. /* remember the last custom size for mode validation */
  207. qdev->monitors_config_width = mode->hdisplay;
  208. qdev->monitors_config_height = mode->vdisplay;
  209. return 1;
  210. }
  211. static struct mode_size {
  212. int w;
  213. int h;
  214. } common_modes[] = {
  215. { 640, 480},
  216. { 720, 480},
  217. { 800, 600},
  218. { 848, 480},
  219. {1024, 768},
  220. {1152, 768},
  221. {1280, 720},
  222. {1280, 800},
  223. {1280, 854},
  224. {1280, 960},
  225. {1280, 1024},
  226. {1440, 900},
  227. {1400, 1050},
  228. {1680, 1050},
  229. {1600, 1200},
  230. {1920, 1080},
  231. {1920, 1200}
  232. };
  233. static int qxl_add_common_modes(struct drm_connector *connector,
  234. unsigned pwidth,
  235. unsigned pheight)
  236. {
  237. struct drm_device *dev = connector->dev;
  238. struct drm_display_mode *mode = NULL;
  239. int i;
  240. for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
  241. mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
  242. 60, false, false, false);
  243. if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
  244. mode->type |= DRM_MODE_TYPE_PREFERRED;
  245. drm_mode_probed_add(connector, mode);
  246. }
  247. return i - 1;
  248. }
  249. static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
  250. struct drm_crtc_state *old_crtc_state)
  251. {
  252. struct drm_device *dev = crtc->dev;
  253. struct drm_pending_vblank_event *event;
  254. unsigned long flags;
  255. if (crtc->state && crtc->state->event) {
  256. event = crtc->state->event;
  257. crtc->state->event = NULL;
  258. spin_lock_irqsave(&dev->event_lock, flags);
  259. drm_crtc_send_vblank_event(crtc, event);
  260. spin_unlock_irqrestore(&dev->event_lock, flags);
  261. }
  262. }
  263. static void qxl_crtc_destroy(struct drm_crtc *crtc)
  264. {
  265. struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
  266. drm_crtc_cleanup(crtc);
  267. kfree(qxl_crtc);
  268. }
  269. static const struct drm_crtc_funcs qxl_crtc_funcs = {
  270. .set_config = drm_atomic_helper_set_config,
  271. .destroy = qxl_crtc_destroy,
  272. .page_flip = drm_atomic_helper_page_flip,
  273. .reset = drm_atomic_helper_crtc_reset,
  274. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  275. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  276. };
  277. void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
  278. {
  279. struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
  280. struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
  281. WARN_ON(bo->shadow);
  282. drm_gem_object_unreference_unlocked(qxl_fb->obj);
  283. drm_framebuffer_cleanup(fb);
  284. kfree(qxl_fb);
  285. }
  286. static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
  287. struct drm_file *file_priv,
  288. unsigned flags, unsigned color,
  289. struct drm_clip_rect *clips,
  290. unsigned num_clips)
  291. {
  292. /* TODO: vmwgfx where this was cribbed from had locking. Why? */
  293. struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
  294. struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
  295. struct drm_clip_rect norect;
  296. struct qxl_bo *qobj;
  297. int inc = 1;
  298. drm_modeset_lock_all(fb->dev);
  299. qobj = gem_to_qxl_bo(qxl_fb->obj);
  300. /* if we aren't primary surface ignore this */
  301. if (!qobj->is_primary) {
  302. drm_modeset_unlock_all(fb->dev);
  303. return 0;
  304. }
  305. if (!num_clips) {
  306. num_clips = 1;
  307. clips = &norect;
  308. norect.x1 = norect.y1 = 0;
  309. norect.x2 = fb->width;
  310. norect.y2 = fb->height;
  311. } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
  312. num_clips /= 2;
  313. inc = 2; /* skip source rects */
  314. }
  315. qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
  316. clips, num_clips, inc);
  317. drm_modeset_unlock_all(fb->dev);
  318. return 0;
  319. }
  320. static const struct drm_framebuffer_funcs qxl_fb_funcs = {
  321. .destroy = qxl_user_framebuffer_destroy,
  322. .dirty = qxl_framebuffer_surface_dirty,
  323. /* TODO?
  324. * .create_handle = qxl_user_framebuffer_create_handle, */
  325. };
  326. int
  327. qxl_framebuffer_init(struct drm_device *dev,
  328. struct qxl_framebuffer *qfb,
  329. const struct drm_mode_fb_cmd2 *mode_cmd,
  330. struct drm_gem_object *obj,
  331. const struct drm_framebuffer_funcs *funcs)
  332. {
  333. int ret;
  334. qfb->obj = obj;
  335. drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd);
  336. ret = drm_framebuffer_init(dev, &qfb->base, funcs);
  337. if (ret) {
  338. qfb->obj = NULL;
  339. return ret;
  340. }
  341. return 0;
  342. }
  343. static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
  344. const struct drm_display_mode *mode,
  345. struct drm_display_mode *adjusted_mode)
  346. {
  347. struct drm_device *dev = crtc->dev;
  348. struct qxl_device *qdev = dev->dev_private;
  349. qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
  350. __func__,
  351. mode->hdisplay, mode->vdisplay,
  352. adjusted_mode->hdisplay,
  353. adjusted_mode->vdisplay);
  354. return true;
  355. }
  356. static void
  357. qxl_send_monitors_config(struct qxl_device *qdev)
  358. {
  359. int i;
  360. BUG_ON(!qdev->ram_header->monitors_config);
  361. if (qdev->monitors_config->count == 0) {
  362. qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
  363. return;
  364. }
  365. for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
  366. struct qxl_head *head = &qdev->monitors_config->heads[i];
  367. if (head->y > 8192 || head->x > 8192 ||
  368. head->width > 8192 || head->height > 8192) {
  369. DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
  370. i, head->width, head->height,
  371. head->x, head->y);
  372. return;
  373. }
  374. }
  375. qxl_io_monitors_config(qdev);
  376. }
  377. static void qxl_monitors_config_set(struct qxl_device *qdev,
  378. int index,
  379. unsigned x, unsigned y,
  380. unsigned width, unsigned height,
  381. unsigned surf_id)
  382. {
  383. DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y);
  384. qdev->monitors_config->heads[index].x = x;
  385. qdev->monitors_config->heads[index].y = y;
  386. qdev->monitors_config->heads[index].width = width;
  387. qdev->monitors_config->heads[index].height = height;
  388. qdev->monitors_config->heads[index].surface_id = surf_id;
  389. }
  390. static void qxl_mode_set_nofb(struct drm_crtc *crtc)
  391. {
  392. struct qxl_device *qdev = crtc->dev->dev_private;
  393. struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
  394. struct drm_display_mode *mode = &crtc->mode;
  395. DRM_DEBUG("Mode set (%d,%d)\n",
  396. mode->hdisplay, mode->vdisplay);
  397. qxl_monitors_config_set(qdev, qcrtc->index, 0, 0,
  398. mode->hdisplay, mode->vdisplay, 0);
  399. }
  400. static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
  401. struct drm_crtc_state *old_state)
  402. {
  403. DRM_DEBUG("\n");
  404. }
  405. static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
  406. struct drm_crtc_state *old_state)
  407. {
  408. struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
  409. struct qxl_device *qdev = crtc->dev->dev_private;
  410. qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
  411. qxl_send_monitors_config(qdev);
  412. }
  413. static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
  414. .mode_fixup = qxl_crtc_mode_fixup,
  415. .mode_set_nofb = qxl_mode_set_nofb,
  416. .atomic_flush = qxl_crtc_atomic_flush,
  417. .atomic_enable = qxl_crtc_atomic_enable,
  418. .atomic_disable = qxl_crtc_atomic_disable,
  419. };
  420. static int qxl_primary_atomic_check(struct drm_plane *plane,
  421. struct drm_plane_state *state)
  422. {
  423. struct qxl_device *qdev = plane->dev->dev_private;
  424. struct qxl_framebuffer *qfb;
  425. struct qxl_bo *bo;
  426. if (!state->crtc || !state->fb)
  427. return 0;
  428. qfb = to_qxl_framebuffer(state->fb);
  429. bo = gem_to_qxl_bo(qfb->obj);
  430. if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
  431. DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
  432. return -EINVAL;
  433. }
  434. return 0;
  435. }
  436. static void qxl_primary_atomic_update(struct drm_plane *plane,
  437. struct drm_plane_state *old_state)
  438. {
  439. struct qxl_device *qdev = plane->dev->dev_private;
  440. struct qxl_framebuffer *qfb =
  441. to_qxl_framebuffer(plane->state->fb);
  442. struct qxl_framebuffer *qfb_old;
  443. struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
  444. struct qxl_bo *bo_old;
  445. struct drm_clip_rect norect = {
  446. .x1 = 0,
  447. .y1 = 0,
  448. .x2 = qfb->base.width,
  449. .y2 = qfb->base.height
  450. };
  451. bool same_shadow = false;
  452. if (old_state->fb) {
  453. qfb_old = to_qxl_framebuffer(old_state->fb);
  454. bo_old = gem_to_qxl_bo(qfb_old->obj);
  455. } else {
  456. bo_old = NULL;
  457. }
  458. if (bo == bo_old)
  459. return;
  460. if (bo_old && bo_old->shadow && bo->shadow &&
  461. bo_old->shadow == bo->shadow) {
  462. same_shadow = true;
  463. }
  464. if (bo_old && bo_old->is_primary) {
  465. if (!same_shadow)
  466. qxl_io_destroy_primary(qdev);
  467. bo_old->is_primary = false;
  468. }
  469. if (!bo->is_primary) {
  470. if (!same_shadow)
  471. qxl_io_create_primary(qdev, 0, bo);
  472. bo->is_primary = true;
  473. }
  474. qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
  475. }
  476. static void qxl_primary_atomic_disable(struct drm_plane *plane,
  477. struct drm_plane_state *old_state)
  478. {
  479. struct qxl_device *qdev = plane->dev->dev_private;
  480. if (old_state->fb) {
  481. struct qxl_framebuffer *qfb =
  482. to_qxl_framebuffer(old_state->fb);
  483. struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
  484. if (bo->is_primary) {
  485. qxl_io_destroy_primary(qdev);
  486. bo->is_primary = false;
  487. }
  488. }
  489. }
  490. static int qxl_plane_atomic_check(struct drm_plane *plane,
  491. struct drm_plane_state *state)
  492. {
  493. return 0;
  494. }
  495. static void qxl_cursor_atomic_update(struct drm_plane *plane,
  496. struct drm_plane_state *old_state)
  497. {
  498. struct drm_device *dev = plane->dev;
  499. struct qxl_device *qdev = dev->dev_private;
  500. struct drm_framebuffer *fb = plane->state->fb;
  501. struct qxl_release *release;
  502. struct qxl_cursor_cmd *cmd;
  503. struct qxl_cursor *cursor;
  504. struct drm_gem_object *obj;
  505. struct qxl_bo *cursor_bo, *user_bo = NULL;
  506. int ret;
  507. void *user_ptr;
  508. int size = 64*64*4;
  509. ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
  510. QXL_RELEASE_CURSOR_CMD,
  511. &release, NULL);
  512. if (ret)
  513. return;
  514. if (fb != old_state->fb) {
  515. obj = to_qxl_framebuffer(fb)->obj;
  516. user_bo = gem_to_qxl_bo(obj);
  517. /* pinning is done in the prepare/cleanup framevbuffer */
  518. ret = qxl_bo_kmap(user_bo, &user_ptr);
  519. if (ret)
  520. goto out_free_release;
  521. ret = qxl_alloc_bo_reserved(qdev, release,
  522. sizeof(struct qxl_cursor) + size,
  523. &cursor_bo);
  524. if (ret)
  525. goto out_kunmap;
  526. ret = qxl_release_reserve_list(release, true);
  527. if (ret)
  528. goto out_free_bo;
  529. ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
  530. if (ret)
  531. goto out_backoff;
  532. cursor->header.unique = 0;
  533. cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
  534. cursor->header.width = 64;
  535. cursor->header.height = 64;
  536. cursor->header.hot_spot_x = fb->hot_x;
  537. cursor->header.hot_spot_y = fb->hot_y;
  538. cursor->data_size = size;
  539. cursor->chunk.next_chunk = 0;
  540. cursor->chunk.prev_chunk = 0;
  541. cursor->chunk.data_size = size;
  542. memcpy(cursor->chunk.data, user_ptr, size);
  543. qxl_bo_kunmap(cursor_bo);
  544. qxl_bo_kunmap(user_bo);
  545. cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
  546. cmd->u.set.visible = 1;
  547. cmd->u.set.shape = qxl_bo_physical_address(qdev,
  548. cursor_bo, 0);
  549. cmd->type = QXL_CURSOR_SET;
  550. } else {
  551. ret = qxl_release_reserve_list(release, true);
  552. if (ret)
  553. goto out_free_release;
  554. cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
  555. cmd->type = QXL_CURSOR_MOVE;
  556. }
  557. cmd->u.position.x = plane->state->crtc_x + fb->hot_x;
  558. cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
  559. qxl_release_unmap(qdev, release, &cmd->release_info);
  560. qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
  561. qxl_release_fence_buffer_objects(release);
  562. return;
  563. out_backoff:
  564. qxl_release_backoff_reserve_list(release);
  565. out_free_bo:
  566. qxl_bo_unref(&cursor_bo);
  567. out_kunmap:
  568. qxl_bo_kunmap(user_bo);
  569. out_free_release:
  570. qxl_release_free(qdev, release);
  571. return;
  572. }
  573. static void qxl_cursor_atomic_disable(struct drm_plane *plane,
  574. struct drm_plane_state *old_state)
  575. {
  576. struct qxl_device *qdev = plane->dev->dev_private;
  577. struct qxl_release *release;
  578. struct qxl_cursor_cmd *cmd;
  579. int ret;
  580. ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
  581. QXL_RELEASE_CURSOR_CMD,
  582. &release, NULL);
  583. if (ret)
  584. return;
  585. ret = qxl_release_reserve_list(release, true);
  586. if (ret) {
  587. qxl_release_free(qdev, release);
  588. return;
  589. }
  590. cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
  591. cmd->type = QXL_CURSOR_HIDE;
  592. qxl_release_unmap(qdev, release, &cmd->release_info);
  593. qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
  594. qxl_release_fence_buffer_objects(release);
  595. }
  596. static int qxl_plane_prepare_fb(struct drm_plane *plane,
  597. struct drm_plane_state *new_state)
  598. {
  599. struct qxl_device *qdev = plane->dev->dev_private;
  600. struct drm_gem_object *obj;
  601. struct qxl_bo *user_bo, *old_bo = NULL;
  602. int ret;
  603. if (!new_state->fb)
  604. return 0;
  605. obj = to_qxl_framebuffer(new_state->fb)->obj;
  606. user_bo = gem_to_qxl_bo(obj);
  607. if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
  608. user_bo->is_dumb && !user_bo->shadow) {
  609. if (plane->state->fb) {
  610. obj = to_qxl_framebuffer(plane->state->fb)->obj;
  611. old_bo = gem_to_qxl_bo(obj);
  612. }
  613. if (old_bo && old_bo->shadow &&
  614. user_bo->gem_base.size == old_bo->gem_base.size &&
  615. plane->state->crtc == new_state->crtc &&
  616. plane->state->crtc_w == new_state->crtc_w &&
  617. plane->state->crtc_h == new_state->crtc_h &&
  618. plane->state->src_x == new_state->src_x &&
  619. plane->state->src_y == new_state->src_y &&
  620. plane->state->src_w == new_state->src_w &&
  621. plane->state->src_h == new_state->src_h &&
  622. plane->state->rotation == new_state->rotation &&
  623. plane->state->zpos == new_state->zpos) {
  624. drm_gem_object_get(&old_bo->shadow->gem_base);
  625. user_bo->shadow = old_bo->shadow;
  626. } else {
  627. qxl_bo_create(qdev, user_bo->gem_base.size,
  628. true, true, QXL_GEM_DOMAIN_VRAM, NULL,
  629. &user_bo->shadow);
  630. }
  631. }
  632. ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
  633. if (ret)
  634. return ret;
  635. return 0;
  636. }
  637. static void qxl_plane_cleanup_fb(struct drm_plane *plane,
  638. struct drm_plane_state *old_state)
  639. {
  640. struct drm_gem_object *obj;
  641. struct qxl_bo *user_bo;
  642. if (!old_state->fb) {
  643. /*
  644. * we never executed prepare_fb, so there's nothing to
  645. * unpin.
  646. */
  647. return;
  648. }
  649. obj = to_qxl_framebuffer(old_state->fb)->obj;
  650. user_bo = gem_to_qxl_bo(obj);
  651. qxl_bo_unpin(user_bo);
  652. if (user_bo->shadow && !user_bo->is_primary) {
  653. drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
  654. user_bo->shadow = NULL;
  655. }
  656. }
  657. static const uint32_t qxl_cursor_plane_formats[] = {
  658. DRM_FORMAT_ARGB8888,
  659. };
  660. static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
  661. .atomic_check = qxl_plane_atomic_check,
  662. .atomic_update = qxl_cursor_atomic_update,
  663. .atomic_disable = qxl_cursor_atomic_disable,
  664. .prepare_fb = qxl_plane_prepare_fb,
  665. .cleanup_fb = qxl_plane_cleanup_fb,
  666. };
  667. static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
  668. .update_plane = drm_atomic_helper_update_plane,
  669. .disable_plane = drm_atomic_helper_disable_plane,
  670. .destroy = drm_primary_helper_destroy,
  671. .reset = drm_atomic_helper_plane_reset,
  672. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  673. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  674. };
  675. static const uint32_t qxl_primary_plane_formats[] = {
  676. DRM_FORMAT_XRGB8888,
  677. DRM_FORMAT_ARGB8888,
  678. };
  679. static const struct drm_plane_helper_funcs primary_helper_funcs = {
  680. .atomic_check = qxl_primary_atomic_check,
  681. .atomic_update = qxl_primary_atomic_update,
  682. .atomic_disable = qxl_primary_atomic_disable,
  683. .prepare_fb = qxl_plane_prepare_fb,
  684. .cleanup_fb = qxl_plane_cleanup_fb,
  685. };
  686. static const struct drm_plane_funcs qxl_primary_plane_funcs = {
  687. .update_plane = drm_atomic_helper_update_plane,
  688. .disable_plane = drm_atomic_helper_disable_plane,
  689. .destroy = drm_primary_helper_destroy,
  690. .reset = drm_atomic_helper_plane_reset,
  691. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  692. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  693. };
  694. static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
  695. unsigned int possible_crtcs,
  696. enum drm_plane_type type)
  697. {
  698. const struct drm_plane_helper_funcs *helper_funcs = NULL;
  699. struct drm_plane *plane;
  700. const struct drm_plane_funcs *funcs;
  701. const uint32_t *formats;
  702. int num_formats;
  703. int err;
  704. if (type == DRM_PLANE_TYPE_PRIMARY) {
  705. funcs = &qxl_primary_plane_funcs;
  706. formats = qxl_primary_plane_formats;
  707. num_formats = ARRAY_SIZE(qxl_primary_plane_formats);
  708. helper_funcs = &primary_helper_funcs;
  709. } else if (type == DRM_PLANE_TYPE_CURSOR) {
  710. funcs = &qxl_cursor_plane_funcs;
  711. formats = qxl_cursor_plane_formats;
  712. helper_funcs = &qxl_cursor_helper_funcs;
  713. num_formats = ARRAY_SIZE(qxl_cursor_plane_formats);
  714. } else {
  715. return ERR_PTR(-EINVAL);
  716. }
  717. plane = kzalloc(sizeof(*plane), GFP_KERNEL);
  718. if (!plane)
  719. return ERR_PTR(-ENOMEM);
  720. err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
  721. funcs, formats, num_formats,
  722. NULL, type, NULL);
  723. if (err)
  724. goto free_plane;
  725. drm_plane_helper_add(plane, helper_funcs);
  726. return plane;
  727. free_plane:
  728. kfree(plane);
  729. return ERR_PTR(-EINVAL);
  730. }
  731. static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
  732. {
  733. struct qxl_crtc *qxl_crtc;
  734. struct drm_plane *primary, *cursor;
  735. struct qxl_device *qdev = dev->dev_private;
  736. int r;
  737. qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
  738. if (!qxl_crtc)
  739. return -ENOMEM;
  740. primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY);
  741. if (IS_ERR(primary)) {
  742. r = -ENOMEM;
  743. goto free_mem;
  744. }
  745. cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR);
  746. if (IS_ERR(cursor)) {
  747. r = -ENOMEM;
  748. goto clean_primary;
  749. }
  750. r = drm_crtc_init_with_planes(dev, &qxl_crtc->base, primary, cursor,
  751. &qxl_crtc_funcs, NULL);
  752. if (r)
  753. goto clean_cursor;
  754. qxl_crtc->index = crtc_id;
  755. drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
  756. return 0;
  757. clean_cursor:
  758. drm_plane_cleanup(cursor);
  759. kfree(cursor);
  760. clean_primary:
  761. drm_plane_cleanup(primary);
  762. kfree(primary);
  763. free_mem:
  764. kfree(qxl_crtc);
  765. return r;
  766. }
  767. static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
  768. {
  769. DRM_DEBUG("\n");
  770. }
  771. static void qxl_enc_prepare(struct drm_encoder *encoder)
  772. {
  773. DRM_DEBUG("\n");
  774. }
  775. static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
  776. struct drm_encoder *encoder)
  777. {
  778. int i;
  779. struct qxl_output *output = drm_encoder_to_qxl_output(encoder);
  780. struct qxl_head *head;
  781. struct drm_display_mode *mode;
  782. BUG_ON(!encoder);
  783. /* TODO: ugly, do better */
  784. i = output->index;
  785. if (!qdev->monitors_config ||
  786. qdev->monitors_config->max_allowed <= i) {
  787. DRM_ERROR(
  788. "head number too large or missing monitors config: %p, %d",
  789. qdev->monitors_config,
  790. qdev->monitors_config ?
  791. qdev->monitors_config->max_allowed : -1);
  792. return;
  793. }
  794. if (!encoder->crtc) {
  795. DRM_ERROR("missing crtc on encoder %p\n", encoder);
  796. return;
  797. }
  798. if (i != 0)
  799. DRM_DEBUG("missing for multiple monitors: no head holes\n");
  800. head = &qdev->monitors_config->heads[i];
  801. head->id = i;
  802. if (encoder->crtc->enabled) {
  803. mode = &encoder->crtc->mode;
  804. head->width = mode->hdisplay;
  805. head->height = mode->vdisplay;
  806. head->x = encoder->crtc->x;
  807. head->y = encoder->crtc->y;
  808. if (qdev->monitors_config->count < i + 1)
  809. qdev->monitors_config->count = i + 1;
  810. } else {
  811. head->width = 0;
  812. head->height = 0;
  813. head->x = 0;
  814. head->y = 0;
  815. }
  816. DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n",
  817. i, head->x, head->y, head->width, head->height, qdev->monitors_config->count);
  818. head->flags = 0;
  819. /* TODO - somewhere else to call this for multiple monitors
  820. * (config_commit?) */
  821. qxl_send_monitors_config(qdev);
  822. }
  823. static void qxl_enc_commit(struct drm_encoder *encoder)
  824. {
  825. struct qxl_device *qdev = encoder->dev->dev_private;
  826. qxl_write_monitors_config_for_encoder(qdev, encoder);
  827. DRM_DEBUG("\n");
  828. }
  829. static void qxl_enc_mode_set(struct drm_encoder *encoder,
  830. struct drm_display_mode *mode,
  831. struct drm_display_mode *adjusted_mode)
  832. {
  833. DRM_DEBUG("\n");
  834. }
  835. static int qxl_conn_get_modes(struct drm_connector *connector)
  836. {
  837. unsigned pwidth = 1024;
  838. unsigned pheight = 768;
  839. int ret = 0;
  840. ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
  841. if (ret < 0)
  842. return ret;
  843. ret += qxl_add_common_modes(connector, pwidth, pheight);
  844. return ret;
  845. }
  846. static int qxl_conn_mode_valid(struct drm_connector *connector,
  847. struct drm_display_mode *mode)
  848. {
  849. struct drm_device *ddev = connector->dev;
  850. struct qxl_device *qdev = ddev->dev_private;
  851. int i;
  852. /* TODO: is this called for user defined modes? (xrandr --add-mode)
  853. * TODO: check that the mode fits in the framebuffer */
  854. if(qdev->monitors_config_width == mode->hdisplay &&
  855. qdev->monitors_config_height == mode->vdisplay)
  856. return MODE_OK;
  857. for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
  858. if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
  859. return MODE_OK;
  860. }
  861. return MODE_BAD;
  862. }
  863. static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
  864. {
  865. struct qxl_output *qxl_output =
  866. drm_connector_to_qxl_output(connector);
  867. DRM_DEBUG("\n");
  868. return &qxl_output->enc;
  869. }
  870. static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
  871. .dpms = qxl_enc_dpms,
  872. .prepare = qxl_enc_prepare,
  873. .mode_set = qxl_enc_mode_set,
  874. .commit = qxl_enc_commit,
  875. };
  876. static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
  877. .get_modes = qxl_conn_get_modes,
  878. .mode_valid = qxl_conn_mode_valid,
  879. .best_encoder = qxl_best_encoder,
  880. };
  881. static enum drm_connector_status qxl_conn_detect(
  882. struct drm_connector *connector,
  883. bool force)
  884. {
  885. struct qxl_output *output =
  886. drm_connector_to_qxl_output(connector);
  887. struct drm_device *ddev = connector->dev;
  888. struct qxl_device *qdev = ddev->dev_private;
  889. bool connected = false;
  890. /* The first monitor is always connected */
  891. if (!qdev->client_monitors_config) {
  892. if (output->index == 0)
  893. connected = true;
  894. } else
  895. connected = qdev->client_monitors_config->count > output->index &&
  896. qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
  897. DRM_DEBUG("#%d connected: %d\n", output->index, connected);
  898. if (!connected)
  899. qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
  900. return connected ? connector_status_connected
  901. : connector_status_disconnected;
  902. }
  903. static int qxl_conn_set_property(struct drm_connector *connector,
  904. struct drm_property *property,
  905. uint64_t value)
  906. {
  907. DRM_DEBUG("\n");
  908. return 0;
  909. }
  910. static void qxl_conn_destroy(struct drm_connector *connector)
  911. {
  912. struct qxl_output *qxl_output =
  913. drm_connector_to_qxl_output(connector);
  914. drm_connector_unregister(connector);
  915. drm_connector_cleanup(connector);
  916. kfree(qxl_output);
  917. }
  918. static const struct drm_connector_funcs qxl_connector_funcs = {
  919. .dpms = drm_helper_connector_dpms,
  920. .detect = qxl_conn_detect,
  921. .fill_modes = drm_helper_probe_single_connector_modes,
  922. .set_property = qxl_conn_set_property,
  923. .destroy = qxl_conn_destroy,
  924. .reset = drm_atomic_helper_connector_reset,
  925. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  926. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  927. };
  928. static void qxl_enc_destroy(struct drm_encoder *encoder)
  929. {
  930. drm_encoder_cleanup(encoder);
  931. }
  932. static const struct drm_encoder_funcs qxl_enc_funcs = {
  933. .destroy = qxl_enc_destroy,
  934. };
  935. static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
  936. {
  937. if (qdev->hotplug_mode_update_property)
  938. return 0;
  939. qdev->hotplug_mode_update_property =
  940. drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
  941. "hotplug_mode_update", 0, 1);
  942. return 0;
  943. }
  944. static int qdev_output_init(struct drm_device *dev, int num_output)
  945. {
  946. struct qxl_device *qdev = dev->dev_private;
  947. struct qxl_output *qxl_output;
  948. struct drm_connector *connector;
  949. struct drm_encoder *encoder;
  950. qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
  951. if (!qxl_output)
  952. return -ENOMEM;
  953. qxl_output->index = num_output;
  954. connector = &qxl_output->base;
  955. encoder = &qxl_output->enc;
  956. drm_connector_init(dev, &qxl_output->base,
  957. &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
  958. drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
  959. DRM_MODE_ENCODER_VIRTUAL, NULL);
  960. /* we get HPD via client monitors config */
  961. connector->polled = DRM_CONNECTOR_POLL_HPD;
  962. encoder->possible_crtcs = 1 << num_output;
  963. drm_mode_connector_attach_encoder(&qxl_output->base,
  964. &qxl_output->enc);
  965. drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
  966. drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
  967. drm_object_attach_property(&connector->base,
  968. qdev->hotplug_mode_update_property, 0);
  969. drm_object_attach_property(&connector->base,
  970. dev->mode_config.suggested_x_property, 0);
  971. drm_object_attach_property(&connector->base,
  972. dev->mode_config.suggested_y_property, 0);
  973. return 0;
  974. }
  975. static struct drm_framebuffer *
  976. qxl_user_framebuffer_create(struct drm_device *dev,
  977. struct drm_file *file_priv,
  978. const struct drm_mode_fb_cmd2 *mode_cmd)
  979. {
  980. struct drm_gem_object *obj;
  981. struct qxl_framebuffer *qxl_fb;
  982. int ret;
  983. obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
  984. if (!obj)
  985. return NULL;
  986. qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
  987. if (qxl_fb == NULL)
  988. return NULL;
  989. ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
  990. if (ret) {
  991. kfree(qxl_fb);
  992. drm_gem_object_unreference_unlocked(obj);
  993. return NULL;
  994. }
  995. return &qxl_fb->base;
  996. }
  997. static const struct drm_mode_config_funcs qxl_mode_funcs = {
  998. .fb_create = qxl_user_framebuffer_create,
  999. .atomic_check = drm_atomic_helper_check,
  1000. .atomic_commit = drm_atomic_helper_commit,
  1001. };
  1002. int qxl_create_monitors_object(struct qxl_device *qdev)
  1003. {
  1004. int ret;
  1005. struct drm_gem_object *gobj;
  1006. int max_allowed = qxl_num_crtc;
  1007. int monitors_config_size = sizeof(struct qxl_monitors_config) +
  1008. max_allowed * sizeof(struct qxl_head);
  1009. ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
  1010. QXL_GEM_DOMAIN_VRAM,
  1011. false, false, NULL, &gobj);
  1012. if (ret) {
  1013. DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
  1014. return -ENOMEM;
  1015. }
  1016. qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
  1017. ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
  1018. if (ret)
  1019. return ret;
  1020. qxl_bo_kmap(qdev->monitors_config_bo, NULL);
  1021. qdev->monitors_config = qdev->monitors_config_bo->kptr;
  1022. qdev->ram_header->monitors_config =
  1023. qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
  1024. memset(qdev->monitors_config, 0, monitors_config_size);
  1025. qdev->monitors_config->max_allowed = max_allowed;
  1026. return 0;
  1027. }
  1028. int qxl_destroy_monitors_object(struct qxl_device *qdev)
  1029. {
  1030. int ret;
  1031. qdev->monitors_config = NULL;
  1032. qdev->ram_header->monitors_config = 0;
  1033. qxl_bo_kunmap(qdev->monitors_config_bo);
  1034. ret = qxl_bo_unpin(qdev->monitors_config_bo);
  1035. if (ret)
  1036. return ret;
  1037. qxl_bo_unref(&qdev->monitors_config_bo);
  1038. return 0;
  1039. }
  1040. int qxl_modeset_init(struct qxl_device *qdev)
  1041. {
  1042. int i;
  1043. int ret;
  1044. drm_mode_config_init(&qdev->ddev);
  1045. ret = qxl_create_monitors_object(qdev);
  1046. if (ret)
  1047. return ret;
  1048. qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
  1049. /* modes will be validated against the framebuffer size */
  1050. qdev->ddev.mode_config.min_width = 0;
  1051. qdev->ddev.mode_config.min_height = 0;
  1052. qdev->ddev.mode_config.max_width = 8192;
  1053. qdev->ddev.mode_config.max_height = 8192;
  1054. qdev->ddev.mode_config.fb_base = qdev->vram_base;
  1055. drm_mode_create_suggested_offset_properties(&qdev->ddev);
  1056. qxl_mode_create_hotplug_mode_update_property(qdev);
  1057. for (i = 0 ; i < qxl_num_crtc; ++i) {
  1058. qdev_crtc_init(&qdev->ddev, i);
  1059. qdev_output_init(&qdev->ddev, i);
  1060. }
  1061. qxl_display_read_client_monitors_config(qdev);
  1062. qdev->mode_info.mode_config_initialized = true;
  1063. drm_mode_config_reset(&qdev->ddev);
  1064. /* primary surface must be created by this point, to allow
  1065. * issuing command queue commands and having them read by
  1066. * spice server. */
  1067. qxl_fbdev_init(qdev);
  1068. return 0;
  1069. }
  1070. void qxl_modeset_fini(struct qxl_device *qdev)
  1071. {
  1072. qxl_fbdev_fini(qdev);
  1073. qxl_destroy_monitors_object(qdev);
  1074. if (qdev->mode_info.mode_config_initialized) {
  1075. drm_mode_config_cleanup(&qdev->ddev);
  1076. qdev->mode_info.mode_config_initialized = false;
  1077. }
  1078. }