gma_display.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * Copyright © 2006-2011 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Authors:
  18. * Eric Anholt <eric@anholt.net>
  19. * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
  20. */
  21. #include <drm/drmP.h>
  22. #include "gma_display.h"
  23. #include "psb_intel_drv.h"
  24. #include "psb_intel_reg.h"
  25. #include "psb_drv.h"
  26. #include "framebuffer.h"
  27. /**
  28. * Returns whether any output on the specified pipe is of the specified type
  29. */
  30. bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
  31. {
  32. struct drm_device *dev = crtc->dev;
  33. struct drm_mode_config *mode_config = &dev->mode_config;
  34. struct drm_connector *l_entry;
  35. list_for_each_entry(l_entry, &mode_config->connector_list, head) {
  36. if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
  37. struct gma_encoder *gma_encoder =
  38. gma_attached_encoder(l_entry);
  39. if (gma_encoder->type == type)
  40. return true;
  41. }
  42. }
  43. return false;
  44. }
  45. void gma_wait_for_vblank(struct drm_device *dev)
  46. {
  47. /* Wait for 20ms, i.e. one cycle at 50hz. */
  48. mdelay(20);
  49. }
  50. int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  51. struct drm_framebuffer *old_fb)
  52. {
  53. struct drm_device *dev = crtc->dev;
  54. struct drm_psb_private *dev_priv = dev->dev_private;
  55. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  56. struct drm_framebuffer *fb = crtc->primary->fb;
  57. struct psb_framebuffer *psbfb = to_psb_fb(fb);
  58. int pipe = gma_crtc->pipe;
  59. const struct psb_offset *map = &dev_priv->regmap[pipe];
  60. unsigned long start, offset;
  61. u32 dspcntr;
  62. int ret = 0;
  63. if (!gma_power_begin(dev, true))
  64. return 0;
  65. /* no fb bound */
  66. if (!fb) {
  67. dev_err(dev->dev, "No FB bound\n");
  68. goto gma_pipe_cleaner;
  69. }
  70. /* We are displaying this buffer, make sure it is actually loaded
  71. into the GTT */
  72. ret = psb_gtt_pin(psbfb->gtt);
  73. if (ret < 0)
  74. goto gma_pipe_set_base_exit;
  75. start = psbfb->gtt->offset;
  76. offset = y * fb->pitches[0] + x * fb->format->cpp[0];
  77. REG_WRITE(map->stride, fb->pitches[0]);
  78. dspcntr = REG_READ(map->cntr);
  79. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  80. switch (fb->format->cpp[0] * 8) {
  81. case 8:
  82. dspcntr |= DISPPLANE_8BPP;
  83. break;
  84. case 16:
  85. if (fb->format->depth == 15)
  86. dspcntr |= DISPPLANE_15_16BPP;
  87. else
  88. dspcntr |= DISPPLANE_16BPP;
  89. break;
  90. case 24:
  91. case 32:
  92. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  93. break;
  94. default:
  95. dev_err(dev->dev, "Unknown color depth\n");
  96. ret = -EINVAL;
  97. goto gma_pipe_set_base_exit;
  98. }
  99. REG_WRITE(map->cntr, dspcntr);
  100. dev_dbg(dev->dev,
  101. "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
  102. /* FIXME: Investigate whether this really is the base for psb and why
  103. the linear offset is named base for the other chips. map->surf
  104. should be the base and map->linoff the offset for all chips */
  105. if (IS_PSB(dev)) {
  106. REG_WRITE(map->base, offset + start);
  107. REG_READ(map->base);
  108. } else {
  109. REG_WRITE(map->base, offset);
  110. REG_READ(map->base);
  111. REG_WRITE(map->surf, start);
  112. REG_READ(map->surf);
  113. }
  114. gma_pipe_cleaner:
  115. /* If there was a previous display we can now unpin it */
  116. if (old_fb)
  117. psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
  118. gma_pipe_set_base_exit:
  119. gma_power_end(dev);
  120. return ret;
  121. }
  122. /* Loads the palette/gamma unit for the CRTC with the prepared values */
  123. void gma_crtc_load_lut(struct drm_crtc *crtc)
  124. {
  125. struct drm_device *dev = crtc->dev;
  126. struct drm_psb_private *dev_priv = dev->dev_private;
  127. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  128. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  129. int palreg = map->palette;
  130. int i;
  131. /* The clocks have to be on to load the palette. */
  132. if (!crtc->enabled)
  133. return;
  134. if (gma_power_begin(dev, false)) {
  135. for (i = 0; i < 256; i++) {
  136. REG_WRITE(palreg + 4 * i,
  137. ((gma_crtc->lut_r[i] +
  138. gma_crtc->lut_adj[i]) << 16) |
  139. ((gma_crtc->lut_g[i] +
  140. gma_crtc->lut_adj[i]) << 8) |
  141. (gma_crtc->lut_b[i] +
  142. gma_crtc->lut_adj[i]));
  143. }
  144. gma_power_end(dev);
  145. } else {
  146. for (i = 0; i < 256; i++) {
  147. /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
  148. dev_priv->regs.pipe[0].palette[i] =
  149. ((gma_crtc->lut_r[i] +
  150. gma_crtc->lut_adj[i]) << 16) |
  151. ((gma_crtc->lut_g[i] +
  152. gma_crtc->lut_adj[i]) << 8) |
  153. (gma_crtc->lut_b[i] +
  154. gma_crtc->lut_adj[i]);
  155. }
  156. }
  157. }
  158. int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
  159. u32 size)
  160. {
  161. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  162. int i;
  163. for (i = 0; i < size; i++) {
  164. gma_crtc->lut_r[i] = red[i] >> 8;
  165. gma_crtc->lut_g[i] = green[i] >> 8;
  166. gma_crtc->lut_b[i] = blue[i] >> 8;
  167. }
  168. gma_crtc_load_lut(crtc);
  169. return 0;
  170. }
  171. /**
  172. * Sets the power management mode of the pipe and plane.
  173. *
  174. * This code should probably grow support for turning the cursor off and back
  175. * on appropriately at the same time as we're turning the pipe off/on.
  176. */
  177. void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
  178. {
  179. struct drm_device *dev = crtc->dev;
  180. struct drm_psb_private *dev_priv = dev->dev_private;
  181. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  182. int pipe = gma_crtc->pipe;
  183. const struct psb_offset *map = &dev_priv->regmap[pipe];
  184. u32 temp;
  185. /* XXX: When our outputs are all unaware of DPMS modes other than off
  186. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  187. */
  188. if (IS_CDV(dev))
  189. dev_priv->ops->disable_sr(dev);
  190. switch (mode) {
  191. case DRM_MODE_DPMS_ON:
  192. case DRM_MODE_DPMS_STANDBY:
  193. case DRM_MODE_DPMS_SUSPEND:
  194. if (gma_crtc->active)
  195. break;
  196. gma_crtc->active = true;
  197. /* Enable the DPLL */
  198. temp = REG_READ(map->dpll);
  199. if ((temp & DPLL_VCO_ENABLE) == 0) {
  200. REG_WRITE(map->dpll, temp);
  201. REG_READ(map->dpll);
  202. /* Wait for the clocks to stabilize. */
  203. udelay(150);
  204. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  205. REG_READ(map->dpll);
  206. /* Wait for the clocks to stabilize. */
  207. udelay(150);
  208. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  209. REG_READ(map->dpll);
  210. /* Wait for the clocks to stabilize. */
  211. udelay(150);
  212. }
  213. /* Enable the plane */
  214. temp = REG_READ(map->cntr);
  215. if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
  216. REG_WRITE(map->cntr,
  217. temp | DISPLAY_PLANE_ENABLE);
  218. /* Flush the plane changes */
  219. REG_WRITE(map->base, REG_READ(map->base));
  220. }
  221. udelay(150);
  222. /* Enable the pipe */
  223. temp = REG_READ(map->conf);
  224. if ((temp & PIPEACONF_ENABLE) == 0)
  225. REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
  226. temp = REG_READ(map->status);
  227. temp &= ~(0xFFFF);
  228. temp |= PIPE_FIFO_UNDERRUN;
  229. REG_WRITE(map->status, temp);
  230. REG_READ(map->status);
  231. gma_crtc_load_lut(crtc);
  232. /* Give the overlay scaler a chance to enable
  233. * if it's on this pipe */
  234. /* psb_intel_crtc_dpms_video(crtc, true); TODO */
  235. break;
  236. case DRM_MODE_DPMS_OFF:
  237. if (!gma_crtc->active)
  238. break;
  239. gma_crtc->active = false;
  240. /* Give the overlay scaler a chance to disable
  241. * if it's on this pipe */
  242. /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
  243. /* Disable the VGA plane that we never use */
  244. REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
  245. /* Turn off vblank interrupts */
  246. drm_crtc_vblank_off(crtc);
  247. /* Wait for vblank for the disable to take effect */
  248. gma_wait_for_vblank(dev);
  249. /* Disable plane */
  250. temp = REG_READ(map->cntr);
  251. if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
  252. REG_WRITE(map->cntr,
  253. temp & ~DISPLAY_PLANE_ENABLE);
  254. /* Flush the plane changes */
  255. REG_WRITE(map->base, REG_READ(map->base));
  256. REG_READ(map->base);
  257. }
  258. /* Disable pipe */
  259. temp = REG_READ(map->conf);
  260. if ((temp & PIPEACONF_ENABLE) != 0) {
  261. REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
  262. REG_READ(map->conf);
  263. }
  264. /* Wait for vblank for the disable to take effect. */
  265. gma_wait_for_vblank(dev);
  266. udelay(150);
  267. /* Disable DPLL */
  268. temp = REG_READ(map->dpll);
  269. if ((temp & DPLL_VCO_ENABLE) != 0) {
  270. REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
  271. REG_READ(map->dpll);
  272. }
  273. /* Wait for the clocks to turn off. */
  274. udelay(150);
  275. break;
  276. }
  277. if (IS_CDV(dev))
  278. dev_priv->ops->update_wm(dev, crtc);
  279. /* Set FIFO watermarks */
  280. REG_WRITE(DSPARB, 0x3F3E);
  281. }
  282. int gma_crtc_cursor_set(struct drm_crtc *crtc,
  283. struct drm_file *file_priv,
  284. uint32_t handle,
  285. uint32_t width, uint32_t height)
  286. {
  287. struct drm_device *dev = crtc->dev;
  288. struct drm_psb_private *dev_priv = dev->dev_private;
  289. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  290. int pipe = gma_crtc->pipe;
  291. uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
  292. uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
  293. uint32_t temp;
  294. size_t addr = 0;
  295. struct gtt_range *gt;
  296. struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
  297. struct drm_gem_object *obj;
  298. void *tmp_dst, *tmp_src;
  299. int ret = 0, i, cursor_pages;
  300. /* If we didn't get a handle then turn the cursor off */
  301. if (!handle) {
  302. temp = CURSOR_MODE_DISABLE;
  303. if (gma_power_begin(dev, false)) {
  304. REG_WRITE(control, temp);
  305. REG_WRITE(base, 0);
  306. gma_power_end(dev);
  307. }
  308. /* Unpin the old GEM object */
  309. if (gma_crtc->cursor_obj) {
  310. gt = container_of(gma_crtc->cursor_obj,
  311. struct gtt_range, gem);
  312. psb_gtt_unpin(gt);
  313. drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
  314. gma_crtc->cursor_obj = NULL;
  315. }
  316. return 0;
  317. }
  318. /* Currently we only support 64x64 cursors */
  319. if (width != 64 || height != 64) {
  320. dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
  321. return -EINVAL;
  322. }
  323. obj = drm_gem_object_lookup(file_priv, handle);
  324. if (!obj) {
  325. ret = -ENOENT;
  326. goto unlock;
  327. }
  328. if (obj->size < width * height * 4) {
  329. dev_dbg(dev->dev, "Buffer is too small\n");
  330. ret = -ENOMEM;
  331. goto unref_cursor;
  332. }
  333. gt = container_of(obj, struct gtt_range, gem);
  334. /* Pin the memory into the GTT */
  335. ret = psb_gtt_pin(gt);
  336. if (ret) {
  337. dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
  338. goto unref_cursor;
  339. }
  340. if (dev_priv->ops->cursor_needs_phys) {
  341. if (cursor_gt == NULL) {
  342. dev_err(dev->dev, "No hardware cursor mem available");
  343. ret = -ENOMEM;
  344. goto unref_cursor;
  345. }
  346. /* Prevent overflow */
  347. if (gt->npage > 4)
  348. cursor_pages = 4;
  349. else
  350. cursor_pages = gt->npage;
  351. /* Copy the cursor to cursor mem */
  352. tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
  353. for (i = 0; i < cursor_pages; i++) {
  354. tmp_src = kmap(gt->pages[i]);
  355. memcpy(tmp_dst, tmp_src, PAGE_SIZE);
  356. kunmap(gt->pages[i]);
  357. tmp_dst += PAGE_SIZE;
  358. }
  359. addr = gma_crtc->cursor_addr;
  360. } else {
  361. addr = gt->offset;
  362. gma_crtc->cursor_addr = addr;
  363. }
  364. temp = 0;
  365. /* set the pipe for the cursor */
  366. temp |= (pipe << 28);
  367. temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  368. if (gma_power_begin(dev, false)) {
  369. REG_WRITE(control, temp);
  370. REG_WRITE(base, addr);
  371. gma_power_end(dev);
  372. }
  373. /* unpin the old bo */
  374. if (gma_crtc->cursor_obj) {
  375. gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
  376. psb_gtt_unpin(gt);
  377. drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
  378. }
  379. gma_crtc->cursor_obj = obj;
  380. unlock:
  381. return ret;
  382. unref_cursor:
  383. drm_gem_object_unreference_unlocked(obj);
  384. return ret;
  385. }
  386. int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  387. {
  388. struct drm_device *dev = crtc->dev;
  389. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  390. int pipe = gma_crtc->pipe;
  391. uint32_t temp = 0;
  392. uint32_t addr;
  393. if (x < 0) {
  394. temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
  395. x = -x;
  396. }
  397. if (y < 0) {
  398. temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
  399. y = -y;
  400. }
  401. temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
  402. temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
  403. addr = gma_crtc->cursor_addr;
  404. if (gma_power_begin(dev, false)) {
  405. REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
  406. REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
  407. gma_power_end(dev);
  408. }
  409. return 0;
  410. }
  411. void gma_crtc_prepare(struct drm_crtc *crtc)
  412. {
  413. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  414. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  415. }
  416. void gma_crtc_commit(struct drm_crtc *crtc)
  417. {
  418. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  419. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  420. }
  421. void gma_crtc_disable(struct drm_crtc *crtc)
  422. {
  423. struct gtt_range *gt;
  424. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  425. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  426. if (crtc->primary->fb) {
  427. gt = to_psb_fb(crtc->primary->fb)->gtt;
  428. psb_gtt_unpin(gt);
  429. }
  430. }
  431. void gma_crtc_destroy(struct drm_crtc *crtc)
  432. {
  433. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  434. kfree(gma_crtc->crtc_state);
  435. drm_crtc_cleanup(crtc);
  436. kfree(gma_crtc);
  437. }
  438. int gma_crtc_set_config(struct drm_mode_set *set)
  439. {
  440. struct drm_device *dev = set->crtc->dev;
  441. struct drm_psb_private *dev_priv = dev->dev_private;
  442. int ret;
  443. if (!dev_priv->rpm_enabled)
  444. return drm_crtc_helper_set_config(set);
  445. pm_runtime_forbid(&dev->pdev->dev);
  446. ret = drm_crtc_helper_set_config(set);
  447. pm_runtime_allow(&dev->pdev->dev);
  448. return ret;
  449. }
  450. /**
  451. * Save HW states of given crtc
  452. */
  453. void gma_crtc_save(struct drm_crtc *crtc)
  454. {
  455. struct drm_device *dev = crtc->dev;
  456. struct drm_psb_private *dev_priv = dev->dev_private;
  457. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  458. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  459. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  460. uint32_t palette_reg;
  461. int i;
  462. if (!crtc_state) {
  463. dev_err(dev->dev, "No CRTC state found\n");
  464. return;
  465. }
  466. crtc_state->saveDSPCNTR = REG_READ(map->cntr);
  467. crtc_state->savePIPECONF = REG_READ(map->conf);
  468. crtc_state->savePIPESRC = REG_READ(map->src);
  469. crtc_state->saveFP0 = REG_READ(map->fp0);
  470. crtc_state->saveFP1 = REG_READ(map->fp1);
  471. crtc_state->saveDPLL = REG_READ(map->dpll);
  472. crtc_state->saveHTOTAL = REG_READ(map->htotal);
  473. crtc_state->saveHBLANK = REG_READ(map->hblank);
  474. crtc_state->saveHSYNC = REG_READ(map->hsync);
  475. crtc_state->saveVTOTAL = REG_READ(map->vtotal);
  476. crtc_state->saveVBLANK = REG_READ(map->vblank);
  477. crtc_state->saveVSYNC = REG_READ(map->vsync);
  478. crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
  479. /* NOTE: DSPSIZE DSPPOS only for psb */
  480. crtc_state->saveDSPSIZE = REG_READ(map->size);
  481. crtc_state->saveDSPPOS = REG_READ(map->pos);
  482. crtc_state->saveDSPBASE = REG_READ(map->base);
  483. palette_reg = map->palette;
  484. for (i = 0; i < 256; ++i)
  485. crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
  486. }
  487. /**
  488. * Restore HW states of given crtc
  489. */
  490. void gma_crtc_restore(struct drm_crtc *crtc)
  491. {
  492. struct drm_device *dev = crtc->dev;
  493. struct drm_psb_private *dev_priv = dev->dev_private;
  494. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  495. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  496. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  497. uint32_t palette_reg;
  498. int i;
  499. if (!crtc_state) {
  500. dev_err(dev->dev, "No crtc state\n");
  501. return;
  502. }
  503. if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
  504. REG_WRITE(map->dpll,
  505. crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
  506. REG_READ(map->dpll);
  507. udelay(150);
  508. }
  509. REG_WRITE(map->fp0, crtc_state->saveFP0);
  510. REG_READ(map->fp0);
  511. REG_WRITE(map->fp1, crtc_state->saveFP1);
  512. REG_READ(map->fp1);
  513. REG_WRITE(map->dpll, crtc_state->saveDPLL);
  514. REG_READ(map->dpll);
  515. udelay(150);
  516. REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
  517. REG_WRITE(map->hblank, crtc_state->saveHBLANK);
  518. REG_WRITE(map->hsync, crtc_state->saveHSYNC);
  519. REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
  520. REG_WRITE(map->vblank, crtc_state->saveVBLANK);
  521. REG_WRITE(map->vsync, crtc_state->saveVSYNC);
  522. REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
  523. REG_WRITE(map->size, crtc_state->saveDSPSIZE);
  524. REG_WRITE(map->pos, crtc_state->saveDSPPOS);
  525. REG_WRITE(map->src, crtc_state->savePIPESRC);
  526. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  527. REG_WRITE(map->conf, crtc_state->savePIPECONF);
  528. gma_wait_for_vblank(dev);
  529. REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
  530. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  531. gma_wait_for_vblank(dev);
  532. palette_reg = map->palette;
  533. for (i = 0; i < 256; ++i)
  534. REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
  535. }
  536. void gma_encoder_prepare(struct drm_encoder *encoder)
  537. {
  538. const struct drm_encoder_helper_funcs *encoder_funcs =
  539. encoder->helper_private;
  540. /* lvds has its own version of prepare see psb_intel_lvds_prepare */
  541. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  542. }
  543. void gma_encoder_commit(struct drm_encoder *encoder)
  544. {
  545. const struct drm_encoder_helper_funcs *encoder_funcs =
  546. encoder->helper_private;
  547. /* lvds has its own version of commit see psb_intel_lvds_commit */
  548. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  549. }
  550. void gma_encoder_destroy(struct drm_encoder *encoder)
  551. {
  552. struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
  553. drm_encoder_cleanup(encoder);
  554. kfree(intel_encoder);
  555. }
  556. /* Currently there is only a 1:1 mapping of encoders and connectors */
  557. struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
  558. {
  559. struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
  560. return &gma_encoder->base;
  561. }
  562. void gma_connector_attach_encoder(struct gma_connector *connector,
  563. struct gma_encoder *encoder)
  564. {
  565. connector->encoder = encoder;
  566. drm_mode_connector_attach_encoder(&connector->base,
  567. &encoder->base);
  568. }
  569. #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
  570. bool gma_pll_is_valid(struct drm_crtc *crtc,
  571. const struct gma_limit_t *limit,
  572. struct gma_clock_t *clock)
  573. {
  574. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  575. GMA_PLL_INVALID("p1 out of range");
  576. if (clock->p < limit->p.min || limit->p.max < clock->p)
  577. GMA_PLL_INVALID("p out of range");
  578. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  579. GMA_PLL_INVALID("m2 out of range");
  580. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  581. GMA_PLL_INVALID("m1 out of range");
  582. /* On CDV m1 is always 0 */
  583. if (clock->m1 <= clock->m2 && clock->m1 != 0)
  584. GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
  585. if (clock->m < limit->m.min || limit->m.max < clock->m)
  586. GMA_PLL_INVALID("m out of range");
  587. if (clock->n < limit->n.min || limit->n.max < clock->n)
  588. GMA_PLL_INVALID("n out of range");
  589. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  590. GMA_PLL_INVALID("vco out of range");
  591. /* XXX: We may need to be checking "Dot clock"
  592. * depending on the multiplier, connector, etc.,
  593. * rather than just a single range.
  594. */
  595. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  596. GMA_PLL_INVALID("dot out of range");
  597. return true;
  598. }
  599. bool gma_find_best_pll(const struct gma_limit_t *limit,
  600. struct drm_crtc *crtc, int target, int refclk,
  601. struct gma_clock_t *best_clock)
  602. {
  603. struct drm_device *dev = crtc->dev;
  604. const struct gma_clock_funcs *clock_funcs =
  605. to_gma_crtc(crtc)->clock_funcs;
  606. struct gma_clock_t clock;
  607. int err = target;
  608. if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  609. (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
  610. /*
  611. * For LVDS, if the panel is on, just rely on its current
  612. * settings for dual-channel. We haven't figured out how to
  613. * reliably set up different single/dual channel state, if we
  614. * even can.
  615. */
  616. if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  617. LVDS_CLKB_POWER_UP)
  618. clock.p2 = limit->p2.p2_fast;
  619. else
  620. clock.p2 = limit->p2.p2_slow;
  621. } else {
  622. if (target < limit->p2.dot_limit)
  623. clock.p2 = limit->p2.p2_slow;
  624. else
  625. clock.p2 = limit->p2.p2_fast;
  626. }
  627. memset(best_clock, 0, sizeof(*best_clock));
  628. /* m1 is always 0 on CDV so the outmost loop will run just once */
  629. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  630. for (clock.m2 = limit->m2.min;
  631. (clock.m2 < clock.m1 || clock.m1 == 0) &&
  632. clock.m2 <= limit->m2.max; clock.m2++) {
  633. for (clock.n = limit->n.min;
  634. clock.n <= limit->n.max; clock.n++) {
  635. for (clock.p1 = limit->p1.min;
  636. clock.p1 <= limit->p1.max;
  637. clock.p1++) {
  638. int this_err;
  639. clock_funcs->clock(refclk, &clock);
  640. if (!clock_funcs->pll_is_valid(crtc,
  641. limit, &clock))
  642. continue;
  643. this_err = abs(clock.dot - target);
  644. if (this_err < err) {
  645. *best_clock = clock;
  646. err = this_err;
  647. }
  648. }
  649. }
  650. }
  651. }
  652. return err != target;
  653. }