gma_display.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. * Copyright © 2006-2011 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Authors:
  18. * Eric Anholt <eric@anholt.net>
  19. * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
  20. */
  21. #include <drm/drmP.h>
  22. #include "gma_display.h"
  23. #include "psb_intel_drv.h"
  24. #include "psb_intel_reg.h"
  25. #include "psb_drv.h"
  26. #include "framebuffer.h"
  27. /**
  28. * Returns whether any output on the specified pipe is of the specified type
  29. */
  30. bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
  31. {
  32. struct drm_device *dev = crtc->dev;
  33. struct drm_mode_config *mode_config = &dev->mode_config;
  34. struct drm_connector *l_entry;
  35. list_for_each_entry(l_entry, &mode_config->connector_list, head) {
  36. if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
  37. struct gma_encoder *gma_encoder =
  38. gma_attached_encoder(l_entry);
  39. if (gma_encoder->type == type)
  40. return true;
  41. }
  42. }
  43. return false;
  44. }
  45. void gma_wait_for_vblank(struct drm_device *dev)
  46. {
  47. /* Wait for 20ms, i.e. one cycle at 50hz. */
  48. mdelay(20);
  49. }
  50. int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  51. struct drm_framebuffer *old_fb)
  52. {
  53. struct drm_device *dev = crtc->dev;
  54. struct drm_psb_private *dev_priv = dev->dev_private;
  55. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  56. struct drm_framebuffer *fb = crtc->primary->fb;
  57. struct psb_framebuffer *psbfb = to_psb_fb(fb);
  58. int pipe = gma_crtc->pipe;
  59. const struct psb_offset *map = &dev_priv->regmap[pipe];
  60. unsigned long start, offset;
  61. u32 dspcntr;
  62. int ret = 0;
  63. if (!gma_power_begin(dev, true))
  64. return 0;
  65. /* no fb bound */
  66. if (!fb) {
  67. dev_err(dev->dev, "No FB bound\n");
  68. goto gma_pipe_cleaner;
  69. }
  70. /* We are displaying this buffer, make sure it is actually loaded
  71. into the GTT */
  72. ret = psb_gtt_pin(psbfb->gtt);
  73. if (ret < 0)
  74. goto gma_pipe_set_base_exit;
  75. start = psbfb->gtt->offset;
  76. offset = y * fb->pitches[0] + x * fb->format->cpp[0];
  77. REG_WRITE(map->stride, fb->pitches[0]);
  78. dspcntr = REG_READ(map->cntr);
  79. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  80. switch (fb->format->cpp[0] * 8) {
  81. case 8:
  82. dspcntr |= DISPPLANE_8BPP;
  83. break;
  84. case 16:
  85. if (fb->format->depth == 15)
  86. dspcntr |= DISPPLANE_15_16BPP;
  87. else
  88. dspcntr |= DISPPLANE_16BPP;
  89. break;
  90. case 24:
  91. case 32:
  92. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  93. break;
  94. default:
  95. dev_err(dev->dev, "Unknown color depth\n");
  96. ret = -EINVAL;
  97. goto gma_pipe_set_base_exit;
  98. }
  99. REG_WRITE(map->cntr, dspcntr);
  100. dev_dbg(dev->dev,
  101. "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
  102. /* FIXME: Investigate whether this really is the base for psb and why
  103. the linear offset is named base for the other chips. map->surf
  104. should be the base and map->linoff the offset for all chips */
  105. if (IS_PSB(dev)) {
  106. REG_WRITE(map->base, offset + start);
  107. REG_READ(map->base);
  108. } else {
  109. REG_WRITE(map->base, offset);
  110. REG_READ(map->base);
  111. REG_WRITE(map->surf, start);
  112. REG_READ(map->surf);
  113. }
  114. gma_pipe_cleaner:
  115. /* If there was a previous display we can now unpin it */
  116. if (old_fb)
  117. psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
  118. gma_pipe_set_base_exit:
  119. gma_power_end(dev);
  120. return ret;
  121. }
  122. /* Loads the palette/gamma unit for the CRTC with the prepared values */
  123. void gma_crtc_load_lut(struct drm_crtc *crtc)
  124. {
  125. struct drm_device *dev = crtc->dev;
  126. struct drm_psb_private *dev_priv = dev->dev_private;
  127. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  128. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  129. int palreg = map->palette;
  130. u16 *r, *g, *b;
  131. int i;
  132. /* The clocks have to be on to load the palette. */
  133. if (!crtc->enabled)
  134. return;
  135. r = crtc->gamma_store;
  136. g = r + crtc->gamma_size;
  137. b = g + crtc->gamma_size;
  138. if (gma_power_begin(dev, false)) {
  139. for (i = 0; i < 256; i++) {
  140. REG_WRITE(palreg + 4 * i,
  141. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  142. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  143. ((*b++ >> 8) + gma_crtc->lut_adj[i]));
  144. }
  145. gma_power_end(dev);
  146. } else {
  147. for (i = 0; i < 256; i++) {
  148. /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
  149. dev_priv->regs.pipe[0].palette[i] =
  150. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  151. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  152. ((*b++ >> 8) + gma_crtc->lut_adj[i]);
  153. }
  154. }
  155. }
  156. int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
  157. u32 size,
  158. struct drm_modeset_acquire_ctx *ctx)
  159. {
  160. gma_crtc_load_lut(crtc);
  161. return 0;
  162. }
  163. /**
  164. * Sets the power management mode of the pipe and plane.
  165. *
  166. * This code should probably grow support for turning the cursor off and back
  167. * on appropriately at the same time as we're turning the pipe off/on.
  168. */
  169. void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
  170. {
  171. struct drm_device *dev = crtc->dev;
  172. struct drm_psb_private *dev_priv = dev->dev_private;
  173. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  174. int pipe = gma_crtc->pipe;
  175. const struct psb_offset *map = &dev_priv->regmap[pipe];
  176. u32 temp;
  177. /* XXX: When our outputs are all unaware of DPMS modes other than off
  178. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  179. */
  180. if (IS_CDV(dev))
  181. dev_priv->ops->disable_sr(dev);
  182. switch (mode) {
  183. case DRM_MODE_DPMS_ON:
  184. case DRM_MODE_DPMS_STANDBY:
  185. case DRM_MODE_DPMS_SUSPEND:
  186. if (gma_crtc->active)
  187. break;
  188. gma_crtc->active = true;
  189. /* Enable the DPLL */
  190. temp = REG_READ(map->dpll);
  191. if ((temp & DPLL_VCO_ENABLE) == 0) {
  192. REG_WRITE(map->dpll, temp);
  193. REG_READ(map->dpll);
  194. /* Wait for the clocks to stabilize. */
  195. udelay(150);
  196. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  197. REG_READ(map->dpll);
  198. /* Wait for the clocks to stabilize. */
  199. udelay(150);
  200. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  201. REG_READ(map->dpll);
  202. /* Wait for the clocks to stabilize. */
  203. udelay(150);
  204. }
  205. /* Enable the plane */
  206. temp = REG_READ(map->cntr);
  207. if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
  208. REG_WRITE(map->cntr,
  209. temp | DISPLAY_PLANE_ENABLE);
  210. /* Flush the plane changes */
  211. REG_WRITE(map->base, REG_READ(map->base));
  212. }
  213. udelay(150);
  214. /* Enable the pipe */
  215. temp = REG_READ(map->conf);
  216. if ((temp & PIPEACONF_ENABLE) == 0)
  217. REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
  218. temp = REG_READ(map->status);
  219. temp &= ~(0xFFFF);
  220. temp |= PIPE_FIFO_UNDERRUN;
  221. REG_WRITE(map->status, temp);
  222. REG_READ(map->status);
  223. gma_crtc_load_lut(crtc);
  224. /* Give the overlay scaler a chance to enable
  225. * if it's on this pipe */
  226. /* psb_intel_crtc_dpms_video(crtc, true); TODO */
  227. break;
  228. case DRM_MODE_DPMS_OFF:
  229. if (!gma_crtc->active)
  230. break;
  231. gma_crtc->active = false;
  232. /* Give the overlay scaler a chance to disable
  233. * if it's on this pipe */
  234. /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
  235. /* Disable the VGA plane that we never use */
  236. REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
  237. /* Turn off vblank interrupts */
  238. drm_crtc_vblank_off(crtc);
  239. /* Wait for vblank for the disable to take effect */
  240. gma_wait_for_vblank(dev);
  241. /* Disable plane */
  242. temp = REG_READ(map->cntr);
  243. if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
  244. REG_WRITE(map->cntr,
  245. temp & ~DISPLAY_PLANE_ENABLE);
  246. /* Flush the plane changes */
  247. REG_WRITE(map->base, REG_READ(map->base));
  248. REG_READ(map->base);
  249. }
  250. /* Disable pipe */
  251. temp = REG_READ(map->conf);
  252. if ((temp & PIPEACONF_ENABLE) != 0) {
  253. REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
  254. REG_READ(map->conf);
  255. }
  256. /* Wait for vblank for the disable to take effect. */
  257. gma_wait_for_vblank(dev);
  258. udelay(150);
  259. /* Disable DPLL */
  260. temp = REG_READ(map->dpll);
  261. if ((temp & DPLL_VCO_ENABLE) != 0) {
  262. REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
  263. REG_READ(map->dpll);
  264. }
  265. /* Wait for the clocks to turn off. */
  266. udelay(150);
  267. break;
  268. }
  269. if (IS_CDV(dev))
  270. dev_priv->ops->update_wm(dev, crtc);
  271. /* Set FIFO watermarks */
  272. REG_WRITE(DSPARB, 0x3F3E);
  273. }
  274. int gma_crtc_cursor_set(struct drm_crtc *crtc,
  275. struct drm_file *file_priv,
  276. uint32_t handle,
  277. uint32_t width, uint32_t height)
  278. {
  279. struct drm_device *dev = crtc->dev;
  280. struct drm_psb_private *dev_priv = dev->dev_private;
  281. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  282. int pipe = gma_crtc->pipe;
  283. uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
  284. uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
  285. uint32_t temp;
  286. size_t addr = 0;
  287. struct gtt_range *gt;
  288. struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
  289. struct drm_gem_object *obj;
  290. void *tmp_dst, *tmp_src;
  291. int ret = 0, i, cursor_pages;
  292. /* If we didn't get a handle then turn the cursor off */
  293. if (!handle) {
  294. temp = CURSOR_MODE_DISABLE;
  295. if (gma_power_begin(dev, false)) {
  296. REG_WRITE(control, temp);
  297. REG_WRITE(base, 0);
  298. gma_power_end(dev);
  299. }
  300. /* Unpin the old GEM object */
  301. if (gma_crtc->cursor_obj) {
  302. gt = container_of(gma_crtc->cursor_obj,
  303. struct gtt_range, gem);
  304. psb_gtt_unpin(gt);
  305. drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
  306. gma_crtc->cursor_obj = NULL;
  307. }
  308. return 0;
  309. }
  310. /* Currently we only support 64x64 cursors */
  311. if (width != 64 || height != 64) {
  312. dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
  313. return -EINVAL;
  314. }
  315. obj = drm_gem_object_lookup(file_priv, handle);
  316. if (!obj) {
  317. ret = -ENOENT;
  318. goto unlock;
  319. }
  320. if (obj->size < width * height * 4) {
  321. dev_dbg(dev->dev, "Buffer is too small\n");
  322. ret = -ENOMEM;
  323. goto unref_cursor;
  324. }
  325. gt = container_of(obj, struct gtt_range, gem);
  326. /* Pin the memory into the GTT */
  327. ret = psb_gtt_pin(gt);
  328. if (ret) {
  329. dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
  330. goto unref_cursor;
  331. }
  332. if (dev_priv->ops->cursor_needs_phys) {
  333. if (cursor_gt == NULL) {
  334. dev_err(dev->dev, "No hardware cursor mem available");
  335. ret = -ENOMEM;
  336. goto unref_cursor;
  337. }
  338. /* Prevent overflow */
  339. if (gt->npage > 4)
  340. cursor_pages = 4;
  341. else
  342. cursor_pages = gt->npage;
  343. /* Copy the cursor to cursor mem */
  344. tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
  345. for (i = 0; i < cursor_pages; i++) {
  346. tmp_src = kmap(gt->pages[i]);
  347. memcpy(tmp_dst, tmp_src, PAGE_SIZE);
  348. kunmap(gt->pages[i]);
  349. tmp_dst += PAGE_SIZE;
  350. }
  351. addr = gma_crtc->cursor_addr;
  352. } else {
  353. addr = gt->offset;
  354. gma_crtc->cursor_addr = addr;
  355. }
  356. temp = 0;
  357. /* set the pipe for the cursor */
  358. temp |= (pipe << 28);
  359. temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  360. if (gma_power_begin(dev, false)) {
  361. REG_WRITE(control, temp);
  362. REG_WRITE(base, addr);
  363. gma_power_end(dev);
  364. }
  365. /* unpin the old bo */
  366. if (gma_crtc->cursor_obj) {
  367. gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
  368. psb_gtt_unpin(gt);
  369. drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
  370. }
  371. gma_crtc->cursor_obj = obj;
  372. unlock:
  373. return ret;
  374. unref_cursor:
  375. drm_gem_object_unreference_unlocked(obj);
  376. return ret;
  377. }
  378. int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  379. {
  380. struct drm_device *dev = crtc->dev;
  381. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  382. int pipe = gma_crtc->pipe;
  383. uint32_t temp = 0;
  384. uint32_t addr;
  385. if (x < 0) {
  386. temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
  387. x = -x;
  388. }
  389. if (y < 0) {
  390. temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
  391. y = -y;
  392. }
  393. temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
  394. temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
  395. addr = gma_crtc->cursor_addr;
  396. if (gma_power_begin(dev, false)) {
  397. REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
  398. REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
  399. gma_power_end(dev);
  400. }
  401. return 0;
  402. }
  403. void gma_crtc_prepare(struct drm_crtc *crtc)
  404. {
  405. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  406. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  407. }
  408. void gma_crtc_commit(struct drm_crtc *crtc)
  409. {
  410. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  411. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  412. }
  413. void gma_crtc_disable(struct drm_crtc *crtc)
  414. {
  415. struct gtt_range *gt;
  416. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  417. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  418. if (crtc->primary->fb) {
  419. gt = to_psb_fb(crtc->primary->fb)->gtt;
  420. psb_gtt_unpin(gt);
  421. }
  422. }
  423. void gma_crtc_destroy(struct drm_crtc *crtc)
  424. {
  425. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  426. kfree(gma_crtc->crtc_state);
  427. drm_crtc_cleanup(crtc);
  428. kfree(gma_crtc);
  429. }
  430. int gma_crtc_set_config(struct drm_mode_set *set,
  431. struct drm_modeset_acquire_ctx *ctx)
  432. {
  433. struct drm_device *dev = set->crtc->dev;
  434. struct drm_psb_private *dev_priv = dev->dev_private;
  435. int ret;
  436. if (!dev_priv->rpm_enabled)
  437. return drm_crtc_helper_set_config(set, ctx);
  438. pm_runtime_forbid(&dev->pdev->dev);
  439. ret = drm_crtc_helper_set_config(set, ctx);
  440. pm_runtime_allow(&dev->pdev->dev);
  441. return ret;
  442. }
  443. /**
  444. * Save HW states of given crtc
  445. */
  446. void gma_crtc_save(struct drm_crtc *crtc)
  447. {
  448. struct drm_device *dev = crtc->dev;
  449. struct drm_psb_private *dev_priv = dev->dev_private;
  450. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  451. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  452. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  453. uint32_t palette_reg;
  454. int i;
  455. if (!crtc_state) {
  456. dev_err(dev->dev, "No CRTC state found\n");
  457. return;
  458. }
  459. crtc_state->saveDSPCNTR = REG_READ(map->cntr);
  460. crtc_state->savePIPECONF = REG_READ(map->conf);
  461. crtc_state->savePIPESRC = REG_READ(map->src);
  462. crtc_state->saveFP0 = REG_READ(map->fp0);
  463. crtc_state->saveFP1 = REG_READ(map->fp1);
  464. crtc_state->saveDPLL = REG_READ(map->dpll);
  465. crtc_state->saveHTOTAL = REG_READ(map->htotal);
  466. crtc_state->saveHBLANK = REG_READ(map->hblank);
  467. crtc_state->saveHSYNC = REG_READ(map->hsync);
  468. crtc_state->saveVTOTAL = REG_READ(map->vtotal);
  469. crtc_state->saveVBLANK = REG_READ(map->vblank);
  470. crtc_state->saveVSYNC = REG_READ(map->vsync);
  471. crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
  472. /* NOTE: DSPSIZE DSPPOS only for psb */
  473. crtc_state->saveDSPSIZE = REG_READ(map->size);
  474. crtc_state->saveDSPPOS = REG_READ(map->pos);
  475. crtc_state->saveDSPBASE = REG_READ(map->base);
  476. palette_reg = map->palette;
  477. for (i = 0; i < 256; ++i)
  478. crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
  479. }
  480. /**
  481. * Restore HW states of given crtc
  482. */
  483. void gma_crtc_restore(struct drm_crtc *crtc)
  484. {
  485. struct drm_device *dev = crtc->dev;
  486. struct drm_psb_private *dev_priv = dev->dev_private;
  487. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  488. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  489. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  490. uint32_t palette_reg;
  491. int i;
  492. if (!crtc_state) {
  493. dev_err(dev->dev, "No crtc state\n");
  494. return;
  495. }
  496. if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
  497. REG_WRITE(map->dpll,
  498. crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
  499. REG_READ(map->dpll);
  500. udelay(150);
  501. }
  502. REG_WRITE(map->fp0, crtc_state->saveFP0);
  503. REG_READ(map->fp0);
  504. REG_WRITE(map->fp1, crtc_state->saveFP1);
  505. REG_READ(map->fp1);
  506. REG_WRITE(map->dpll, crtc_state->saveDPLL);
  507. REG_READ(map->dpll);
  508. udelay(150);
  509. REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
  510. REG_WRITE(map->hblank, crtc_state->saveHBLANK);
  511. REG_WRITE(map->hsync, crtc_state->saveHSYNC);
  512. REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
  513. REG_WRITE(map->vblank, crtc_state->saveVBLANK);
  514. REG_WRITE(map->vsync, crtc_state->saveVSYNC);
  515. REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
  516. REG_WRITE(map->size, crtc_state->saveDSPSIZE);
  517. REG_WRITE(map->pos, crtc_state->saveDSPPOS);
  518. REG_WRITE(map->src, crtc_state->savePIPESRC);
  519. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  520. REG_WRITE(map->conf, crtc_state->savePIPECONF);
  521. gma_wait_for_vblank(dev);
  522. REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
  523. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  524. gma_wait_for_vblank(dev);
  525. palette_reg = map->palette;
  526. for (i = 0; i < 256; ++i)
  527. REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
  528. }
  529. void gma_encoder_prepare(struct drm_encoder *encoder)
  530. {
  531. const struct drm_encoder_helper_funcs *encoder_funcs =
  532. encoder->helper_private;
  533. /* lvds has its own version of prepare see psb_intel_lvds_prepare */
  534. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  535. }
  536. void gma_encoder_commit(struct drm_encoder *encoder)
  537. {
  538. const struct drm_encoder_helper_funcs *encoder_funcs =
  539. encoder->helper_private;
  540. /* lvds has its own version of commit see psb_intel_lvds_commit */
  541. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  542. }
  543. void gma_encoder_destroy(struct drm_encoder *encoder)
  544. {
  545. struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
  546. drm_encoder_cleanup(encoder);
  547. kfree(intel_encoder);
  548. }
  549. /* Currently there is only a 1:1 mapping of encoders and connectors */
  550. struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
  551. {
  552. struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
  553. return &gma_encoder->base;
  554. }
  555. void gma_connector_attach_encoder(struct gma_connector *connector,
  556. struct gma_encoder *encoder)
  557. {
  558. connector->encoder = encoder;
  559. drm_mode_connector_attach_encoder(&connector->base,
  560. &encoder->base);
  561. }
  562. #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
  563. bool gma_pll_is_valid(struct drm_crtc *crtc,
  564. const struct gma_limit_t *limit,
  565. struct gma_clock_t *clock)
  566. {
  567. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  568. GMA_PLL_INVALID("p1 out of range");
  569. if (clock->p < limit->p.min || limit->p.max < clock->p)
  570. GMA_PLL_INVALID("p out of range");
  571. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  572. GMA_PLL_INVALID("m2 out of range");
  573. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  574. GMA_PLL_INVALID("m1 out of range");
  575. /* On CDV m1 is always 0 */
  576. if (clock->m1 <= clock->m2 && clock->m1 != 0)
  577. GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
  578. if (clock->m < limit->m.min || limit->m.max < clock->m)
  579. GMA_PLL_INVALID("m out of range");
  580. if (clock->n < limit->n.min || limit->n.max < clock->n)
  581. GMA_PLL_INVALID("n out of range");
  582. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  583. GMA_PLL_INVALID("vco out of range");
  584. /* XXX: We may need to be checking "Dot clock"
  585. * depending on the multiplier, connector, etc.,
  586. * rather than just a single range.
  587. */
  588. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  589. GMA_PLL_INVALID("dot out of range");
  590. return true;
  591. }
  592. bool gma_find_best_pll(const struct gma_limit_t *limit,
  593. struct drm_crtc *crtc, int target, int refclk,
  594. struct gma_clock_t *best_clock)
  595. {
  596. struct drm_device *dev = crtc->dev;
  597. const struct gma_clock_funcs *clock_funcs =
  598. to_gma_crtc(crtc)->clock_funcs;
  599. struct gma_clock_t clock;
  600. int err = target;
  601. if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  602. (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
  603. /*
  604. * For LVDS, if the panel is on, just rely on its current
  605. * settings for dual-channel. We haven't figured out how to
  606. * reliably set up different single/dual channel state, if we
  607. * even can.
  608. */
  609. if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  610. LVDS_CLKB_POWER_UP)
  611. clock.p2 = limit->p2.p2_fast;
  612. else
  613. clock.p2 = limit->p2.p2_slow;
  614. } else {
  615. if (target < limit->p2.dot_limit)
  616. clock.p2 = limit->p2.p2_slow;
  617. else
  618. clock.p2 = limit->p2.p2_fast;
  619. }
  620. memset(best_clock, 0, sizeof(*best_clock));
  621. /* m1 is always 0 on CDV so the outmost loop will run just once */
  622. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  623. for (clock.m2 = limit->m2.min;
  624. (clock.m2 < clock.m1 || clock.m1 == 0) &&
  625. clock.m2 <= limit->m2.max; clock.m2++) {
  626. for (clock.n = limit->n.min;
  627. clock.n <= limit->n.max; clock.n++) {
  628. for (clock.p1 = limit->p1.min;
  629. clock.p1 <= limit->p1.max;
  630. clock.p1++) {
  631. int this_err;
  632. clock_funcs->clock(refclk, &clock);
  633. if (!clock_funcs->pll_is_valid(crtc,
  634. limit, &clock))
  635. continue;
  636. this_err = abs(clock.dot - target);
  637. if (this_err < err) {
  638. *best_clock = clock;
  639. err = this_err;
  640. }
  641. }
  642. }
  643. }
  644. }
  645. return err != target;
  646. }