gma_display.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright © 2006-2011 Intel Corporation
  4. *
  5. * Authors:
  6. * Eric Anholt <[email protected]>
  7. * Patrik Jakobsson <[email protected]>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/highmem.h>
  11. #include <drm/drm_crtc.h>
  12. #include <drm/drm_fourcc.h>
  13. #include <drm/drm_framebuffer.h>
  14. #include <drm/drm_vblank.h>
  15. #include "framebuffer.h"
  16. #include "gem.h"
  17. #include "gma_display.h"
  18. #include "psb_irq.h"
  19. #include "psb_intel_drv.h"
  20. #include "psb_intel_reg.h"
  21. /*
  22. * Returns whether any output on the specified pipe is of the specified type
  23. */
  24. bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
  25. {
  26. struct drm_device *dev = crtc->dev;
  27. struct drm_connector_list_iter conn_iter;
  28. struct drm_connector *connector;
  29. drm_connector_list_iter_begin(dev, &conn_iter);
  30. drm_for_each_connector_iter(connector, &conn_iter) {
  31. if (connector->encoder && connector->encoder->crtc == crtc) {
  32. struct gma_encoder *gma_encoder =
  33. gma_attached_encoder(connector);
  34. if (gma_encoder->type == type) {
  35. drm_connector_list_iter_end(&conn_iter);
  36. return true;
  37. }
  38. }
  39. }
  40. drm_connector_list_iter_end(&conn_iter);
  41. return false;
  42. }
  43. void gma_wait_for_vblank(struct drm_device *dev)
  44. {
  45. /* Wait for 20ms, i.e. one cycle at 50hz. */
  46. mdelay(20);
  47. }
  48. int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  49. struct drm_framebuffer *old_fb)
  50. {
  51. struct drm_device *dev = crtc->dev;
  52. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  53. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  54. struct drm_framebuffer *fb = crtc->primary->fb;
  55. struct psb_gem_object *pobj;
  56. int pipe = gma_crtc->pipe;
  57. const struct psb_offset *map = &dev_priv->regmap[pipe];
  58. unsigned long start, offset;
  59. u32 dspcntr;
  60. int ret = 0;
  61. if (!gma_power_begin(dev, true))
  62. return 0;
  63. /* no fb bound */
  64. if (!fb) {
  65. dev_err(dev->dev, "No FB bound\n");
  66. goto gma_pipe_cleaner;
  67. }
  68. pobj = to_psb_gem_object(fb->obj[0]);
  69. /* We are displaying this buffer, make sure it is actually loaded
  70. into the GTT */
  71. ret = psb_gem_pin(pobj);
  72. if (ret < 0)
  73. goto gma_pipe_set_base_exit;
  74. start = pobj->offset;
  75. offset = y * fb->pitches[0] + x * fb->format->cpp[0];
  76. REG_WRITE(map->stride, fb->pitches[0]);
  77. dspcntr = REG_READ(map->cntr);
  78. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  79. switch (fb->format->cpp[0] * 8) {
  80. case 8:
  81. dspcntr |= DISPPLANE_8BPP;
  82. break;
  83. case 16:
  84. if (fb->format->depth == 15)
  85. dspcntr |= DISPPLANE_15_16BPP;
  86. else
  87. dspcntr |= DISPPLANE_16BPP;
  88. break;
  89. case 24:
  90. case 32:
  91. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  92. break;
  93. default:
  94. dev_err(dev->dev, "Unknown color depth\n");
  95. ret = -EINVAL;
  96. goto gma_pipe_set_base_exit;
  97. }
  98. REG_WRITE(map->cntr, dspcntr);
  99. dev_dbg(dev->dev,
  100. "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
  101. /* FIXME: Investigate whether this really is the base for psb and why
  102. the linear offset is named base for the other chips. map->surf
  103. should be the base and map->linoff the offset for all chips */
  104. if (IS_PSB(dev)) {
  105. REG_WRITE(map->base, offset + start);
  106. REG_READ(map->base);
  107. } else {
  108. REG_WRITE(map->base, offset);
  109. REG_READ(map->base);
  110. REG_WRITE(map->surf, start);
  111. REG_READ(map->surf);
  112. }
  113. gma_pipe_cleaner:
  114. /* If there was a previous display we can now unpin it */
  115. if (old_fb)
  116. psb_gem_unpin(to_psb_gem_object(old_fb->obj[0]));
  117. gma_pipe_set_base_exit:
  118. gma_power_end(dev);
  119. return ret;
  120. }
  121. /* Loads the palette/gamma unit for the CRTC with the prepared values */
  122. void gma_crtc_load_lut(struct drm_crtc *crtc)
  123. {
  124. struct drm_device *dev = crtc->dev;
  125. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  126. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  127. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  128. int palreg = map->palette;
  129. u16 *r, *g, *b;
  130. int i;
  131. /* The clocks have to be on to load the palette. */
  132. if (!crtc->enabled)
  133. return;
  134. r = crtc->gamma_store;
  135. g = r + crtc->gamma_size;
  136. b = g + crtc->gamma_size;
  137. if (gma_power_begin(dev, false)) {
  138. for (i = 0; i < 256; i++) {
  139. REG_WRITE(palreg + 4 * i,
  140. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  141. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  142. ((*b++ >> 8) + gma_crtc->lut_adj[i]));
  143. }
  144. gma_power_end(dev);
  145. } else {
  146. for (i = 0; i < 256; i++) {
  147. /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
  148. dev_priv->regs.pipe[0].palette[i] =
  149. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  150. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  151. ((*b++ >> 8) + gma_crtc->lut_adj[i]);
  152. }
  153. }
  154. }
  155. static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
  156. u16 *blue, u32 size,
  157. struct drm_modeset_acquire_ctx *ctx)
  158. {
  159. gma_crtc_load_lut(crtc);
  160. return 0;
  161. }
  162. /*
  163. * Sets the power management mode of the pipe and plane.
  164. *
  165. * This code should probably grow support for turning the cursor off and back
  166. * on appropriately at the same time as we're turning the pipe off/on.
  167. */
  168. void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
  169. {
  170. struct drm_device *dev = crtc->dev;
  171. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  172. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  173. int pipe = gma_crtc->pipe;
  174. const struct psb_offset *map = &dev_priv->regmap[pipe];
  175. u32 temp;
  176. /* XXX: When our outputs are all unaware of DPMS modes other than off
  177. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  178. */
  179. if (IS_CDV(dev))
  180. dev_priv->ops->disable_sr(dev);
  181. switch (mode) {
  182. case DRM_MODE_DPMS_ON:
  183. case DRM_MODE_DPMS_STANDBY:
  184. case DRM_MODE_DPMS_SUSPEND:
  185. if (gma_crtc->active)
  186. break;
  187. gma_crtc->active = true;
  188. /* Enable the DPLL */
  189. temp = REG_READ(map->dpll);
  190. if ((temp & DPLL_VCO_ENABLE) == 0) {
  191. REG_WRITE(map->dpll, temp);
  192. REG_READ(map->dpll);
  193. /* Wait for the clocks to stabilize. */
  194. udelay(150);
  195. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  196. REG_READ(map->dpll);
  197. /* Wait for the clocks to stabilize. */
  198. udelay(150);
  199. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  200. REG_READ(map->dpll);
  201. /* Wait for the clocks to stabilize. */
  202. udelay(150);
  203. }
  204. /* Enable the plane */
  205. temp = REG_READ(map->cntr);
  206. if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
  207. REG_WRITE(map->cntr,
  208. temp | DISPLAY_PLANE_ENABLE);
  209. /* Flush the plane changes */
  210. REG_WRITE(map->base, REG_READ(map->base));
  211. }
  212. udelay(150);
  213. /* Enable the pipe */
  214. temp = REG_READ(map->conf);
  215. if ((temp & PIPEACONF_ENABLE) == 0)
  216. REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
  217. temp = REG_READ(map->status);
  218. temp &= ~(0xFFFF);
  219. temp |= PIPE_FIFO_UNDERRUN;
  220. REG_WRITE(map->status, temp);
  221. REG_READ(map->status);
  222. gma_crtc_load_lut(crtc);
  223. /* Give the overlay scaler a chance to enable
  224. * if it's on this pipe */
  225. /* psb_intel_crtc_dpms_video(crtc, true); TODO */
  226. drm_crtc_vblank_on(crtc);
  227. break;
  228. case DRM_MODE_DPMS_OFF:
  229. if (!gma_crtc->active)
  230. break;
  231. gma_crtc->active = false;
  232. /* Give the overlay scaler a chance to disable
  233. * if it's on this pipe */
  234. /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
  235. /* Disable the VGA plane that we never use */
  236. REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
  237. /* Turn off vblank interrupts */
  238. drm_crtc_vblank_off(crtc);
  239. /* Wait for vblank for the disable to take effect */
  240. gma_wait_for_vblank(dev);
  241. /* Disable plane */
  242. temp = REG_READ(map->cntr);
  243. if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
  244. REG_WRITE(map->cntr,
  245. temp & ~DISPLAY_PLANE_ENABLE);
  246. /* Flush the plane changes */
  247. REG_WRITE(map->base, REG_READ(map->base));
  248. REG_READ(map->base);
  249. }
  250. /* Disable pipe */
  251. temp = REG_READ(map->conf);
  252. if ((temp & PIPEACONF_ENABLE) != 0) {
  253. REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
  254. REG_READ(map->conf);
  255. }
  256. /* Wait for vblank for the disable to take effect. */
  257. gma_wait_for_vblank(dev);
  258. udelay(150);
  259. /* Disable DPLL */
  260. temp = REG_READ(map->dpll);
  261. if ((temp & DPLL_VCO_ENABLE) != 0) {
  262. REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
  263. REG_READ(map->dpll);
  264. }
  265. /* Wait for the clocks to turn off. */
  266. udelay(150);
  267. break;
  268. }
  269. if (IS_CDV(dev))
  270. dev_priv->ops->update_wm(dev, crtc);
  271. /* Set FIFO watermarks */
  272. REG_WRITE(DSPARB, 0x3F3E);
  273. }
  274. static int gma_crtc_cursor_set(struct drm_crtc *crtc,
  275. struct drm_file *file_priv, uint32_t handle,
  276. uint32_t width, uint32_t height)
  277. {
  278. struct drm_device *dev = crtc->dev;
  279. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  280. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  281. int pipe = gma_crtc->pipe;
  282. uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
  283. uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
  284. uint32_t temp;
  285. size_t addr = 0;
  286. struct psb_gem_object *pobj;
  287. struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj;
  288. struct drm_gem_object *obj;
  289. void *tmp_dst;
  290. int ret = 0, i, cursor_pages;
  291. /* If we didn't get a handle then turn the cursor off */
  292. if (!handle) {
  293. temp = CURSOR_MODE_DISABLE;
  294. if (gma_power_begin(dev, false)) {
  295. REG_WRITE(control, temp);
  296. REG_WRITE(base, 0);
  297. gma_power_end(dev);
  298. }
  299. /* Unpin the old GEM object */
  300. if (gma_crtc->cursor_obj) {
  301. pobj = to_psb_gem_object(gma_crtc->cursor_obj);
  302. psb_gem_unpin(pobj);
  303. drm_gem_object_put(gma_crtc->cursor_obj);
  304. gma_crtc->cursor_obj = NULL;
  305. }
  306. return 0;
  307. }
  308. /* Currently we only support 64x64 cursors */
  309. if (width != 64 || height != 64) {
  310. dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
  311. return -EINVAL;
  312. }
  313. obj = drm_gem_object_lookup(file_priv, handle);
  314. if (!obj) {
  315. ret = -ENOENT;
  316. goto unlock;
  317. }
  318. if (obj->size < width * height * 4) {
  319. dev_dbg(dev->dev, "Buffer is too small\n");
  320. ret = -ENOMEM;
  321. goto unref_cursor;
  322. }
  323. pobj = to_psb_gem_object(obj);
  324. /* Pin the memory into the GTT */
  325. ret = psb_gem_pin(pobj);
  326. if (ret) {
  327. dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
  328. goto unref_cursor;
  329. }
  330. if (dev_priv->ops->cursor_needs_phys) {
  331. if (!cursor_pobj) {
  332. dev_err(dev->dev, "No hardware cursor mem available");
  333. ret = -ENOMEM;
  334. goto unref_cursor;
  335. }
  336. cursor_pages = obj->size / PAGE_SIZE;
  337. if (cursor_pages > 4)
  338. cursor_pages = 4; /* Prevent overflow */
  339. /* Copy the cursor to cursor mem */
  340. tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
  341. for (i = 0; i < cursor_pages; i++) {
  342. memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE);
  343. tmp_dst += PAGE_SIZE;
  344. }
  345. addr = gma_crtc->cursor_addr;
  346. } else {
  347. addr = pobj->offset;
  348. gma_crtc->cursor_addr = addr;
  349. }
  350. temp = 0;
  351. /* set the pipe for the cursor */
  352. temp |= (pipe << 28);
  353. temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  354. if (gma_power_begin(dev, false)) {
  355. REG_WRITE(control, temp);
  356. REG_WRITE(base, addr);
  357. gma_power_end(dev);
  358. }
  359. /* unpin the old bo */
  360. if (gma_crtc->cursor_obj) {
  361. pobj = to_psb_gem_object(gma_crtc->cursor_obj);
  362. psb_gem_unpin(pobj);
  363. drm_gem_object_put(gma_crtc->cursor_obj);
  364. }
  365. gma_crtc->cursor_obj = obj;
  366. unlock:
  367. return ret;
  368. unref_cursor:
  369. drm_gem_object_put(obj);
  370. return ret;
  371. }
  372. static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  373. {
  374. struct drm_device *dev = crtc->dev;
  375. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  376. int pipe = gma_crtc->pipe;
  377. uint32_t temp = 0;
  378. uint32_t addr;
  379. if (x < 0) {
  380. temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
  381. x = -x;
  382. }
  383. if (y < 0) {
  384. temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
  385. y = -y;
  386. }
  387. temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
  388. temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
  389. addr = gma_crtc->cursor_addr;
  390. if (gma_power_begin(dev, false)) {
  391. REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
  392. REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
  393. gma_power_end(dev);
  394. }
  395. return 0;
  396. }
  397. void gma_crtc_prepare(struct drm_crtc *crtc)
  398. {
  399. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  400. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  401. }
  402. void gma_crtc_commit(struct drm_crtc *crtc)
  403. {
  404. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  405. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  406. }
  407. void gma_crtc_disable(struct drm_crtc *crtc)
  408. {
  409. struct psb_gem_object *pobj;
  410. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  411. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  412. if (crtc->primary->fb) {
  413. pobj = to_psb_gem_object(crtc->primary->fb->obj[0]);
  414. psb_gem_unpin(pobj);
  415. }
  416. }
  417. void gma_crtc_destroy(struct drm_crtc *crtc)
  418. {
  419. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  420. if (gma_crtc->cursor_pobj)
  421. drm_gem_object_put(&gma_crtc->cursor_pobj->base);
  422. kfree(gma_crtc->crtc_state);
  423. drm_crtc_cleanup(crtc);
  424. kfree(gma_crtc);
  425. }
  426. int gma_crtc_page_flip(struct drm_crtc *crtc,
  427. struct drm_framebuffer *fb,
  428. struct drm_pending_vblank_event *event,
  429. uint32_t page_flip_flags,
  430. struct drm_modeset_acquire_ctx *ctx)
  431. {
  432. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  433. struct drm_framebuffer *current_fb = crtc->primary->fb;
  434. struct drm_framebuffer *old_fb = crtc->primary->old_fb;
  435. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  436. struct drm_device *dev = crtc->dev;
  437. unsigned long flags;
  438. int ret;
  439. if (!crtc_funcs->mode_set_base)
  440. return -EINVAL;
  441. /* Using mode_set_base requires the new fb to be set already. */
  442. crtc->primary->fb = fb;
  443. if (event) {
  444. spin_lock_irqsave(&dev->event_lock, flags);
  445. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  446. gma_crtc->page_flip_event = event;
  447. spin_unlock_irqrestore(&dev->event_lock, flags);
  448. /* Call this locked if we want an event at vblank interrupt. */
  449. ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
  450. if (ret) {
  451. spin_lock_irqsave(&dev->event_lock, flags);
  452. if (gma_crtc->page_flip_event) {
  453. gma_crtc->page_flip_event = NULL;
  454. drm_crtc_vblank_put(crtc);
  455. }
  456. spin_unlock_irqrestore(&dev->event_lock, flags);
  457. }
  458. } else {
  459. ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
  460. }
  461. /* Restore previous fb in case of failure. */
  462. if (ret)
  463. crtc->primary->fb = current_fb;
  464. return ret;
  465. }
  466. const struct drm_crtc_funcs gma_crtc_funcs = {
  467. .cursor_set = gma_crtc_cursor_set,
  468. .cursor_move = gma_crtc_cursor_move,
  469. .gamma_set = gma_crtc_gamma_set,
  470. .set_config = drm_crtc_helper_set_config,
  471. .destroy = gma_crtc_destroy,
  472. .page_flip = gma_crtc_page_flip,
  473. .enable_vblank = gma_crtc_enable_vblank,
  474. .disable_vblank = gma_crtc_disable_vblank,
  475. .get_vblank_counter = gma_crtc_get_vblank_counter,
  476. };
  477. /*
  478. * Save HW states of given crtc
  479. */
  480. void gma_crtc_save(struct drm_crtc *crtc)
  481. {
  482. struct drm_device *dev = crtc->dev;
  483. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  484. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  485. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  486. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  487. uint32_t palette_reg;
  488. int i;
  489. if (!crtc_state) {
  490. dev_err(dev->dev, "No CRTC state found\n");
  491. return;
  492. }
  493. crtc_state->saveDSPCNTR = REG_READ(map->cntr);
  494. crtc_state->savePIPECONF = REG_READ(map->conf);
  495. crtc_state->savePIPESRC = REG_READ(map->src);
  496. crtc_state->saveFP0 = REG_READ(map->fp0);
  497. crtc_state->saveFP1 = REG_READ(map->fp1);
  498. crtc_state->saveDPLL = REG_READ(map->dpll);
  499. crtc_state->saveHTOTAL = REG_READ(map->htotal);
  500. crtc_state->saveHBLANK = REG_READ(map->hblank);
  501. crtc_state->saveHSYNC = REG_READ(map->hsync);
  502. crtc_state->saveVTOTAL = REG_READ(map->vtotal);
  503. crtc_state->saveVBLANK = REG_READ(map->vblank);
  504. crtc_state->saveVSYNC = REG_READ(map->vsync);
  505. crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
  506. /* NOTE: DSPSIZE DSPPOS only for psb */
  507. crtc_state->saveDSPSIZE = REG_READ(map->size);
  508. crtc_state->saveDSPPOS = REG_READ(map->pos);
  509. crtc_state->saveDSPBASE = REG_READ(map->base);
  510. palette_reg = map->palette;
  511. for (i = 0; i < 256; ++i)
  512. crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
  513. }
  514. /*
  515. * Restore HW states of given crtc
  516. */
  517. void gma_crtc_restore(struct drm_crtc *crtc)
  518. {
  519. struct drm_device *dev = crtc->dev;
  520. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  521. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  522. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  523. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  524. uint32_t palette_reg;
  525. int i;
  526. if (!crtc_state) {
  527. dev_err(dev->dev, "No crtc state\n");
  528. return;
  529. }
  530. if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
  531. REG_WRITE(map->dpll,
  532. crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
  533. REG_READ(map->dpll);
  534. udelay(150);
  535. }
  536. REG_WRITE(map->fp0, crtc_state->saveFP0);
  537. REG_READ(map->fp0);
  538. REG_WRITE(map->fp1, crtc_state->saveFP1);
  539. REG_READ(map->fp1);
  540. REG_WRITE(map->dpll, crtc_state->saveDPLL);
  541. REG_READ(map->dpll);
  542. udelay(150);
  543. REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
  544. REG_WRITE(map->hblank, crtc_state->saveHBLANK);
  545. REG_WRITE(map->hsync, crtc_state->saveHSYNC);
  546. REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
  547. REG_WRITE(map->vblank, crtc_state->saveVBLANK);
  548. REG_WRITE(map->vsync, crtc_state->saveVSYNC);
  549. REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
  550. REG_WRITE(map->size, crtc_state->saveDSPSIZE);
  551. REG_WRITE(map->pos, crtc_state->saveDSPPOS);
  552. REG_WRITE(map->src, crtc_state->savePIPESRC);
  553. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  554. REG_WRITE(map->conf, crtc_state->savePIPECONF);
  555. gma_wait_for_vblank(dev);
  556. REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
  557. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  558. gma_wait_for_vblank(dev);
  559. palette_reg = map->palette;
  560. for (i = 0; i < 256; ++i)
  561. REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
  562. }
  563. void gma_encoder_prepare(struct drm_encoder *encoder)
  564. {
  565. const struct drm_encoder_helper_funcs *encoder_funcs =
  566. encoder->helper_private;
  567. /* lvds has its own version of prepare see psb_intel_lvds_prepare */
  568. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  569. }
  570. void gma_encoder_commit(struct drm_encoder *encoder)
  571. {
  572. const struct drm_encoder_helper_funcs *encoder_funcs =
  573. encoder->helper_private;
  574. /* lvds has its own version of commit see psb_intel_lvds_commit */
  575. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  576. }
  577. void gma_encoder_destroy(struct drm_encoder *encoder)
  578. {
  579. struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
  580. drm_encoder_cleanup(encoder);
  581. kfree(intel_encoder);
  582. }
  583. /* Currently there is only a 1:1 mapping of encoders and connectors */
  584. struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
  585. {
  586. struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
  587. return &gma_encoder->base;
  588. }
  589. void gma_connector_attach_encoder(struct gma_connector *connector,
  590. struct gma_encoder *encoder)
  591. {
  592. connector->encoder = encoder;
  593. drm_connector_attach_encoder(&connector->base,
  594. &encoder->base);
  595. }
  596. #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
  597. bool gma_pll_is_valid(struct drm_crtc *crtc,
  598. const struct gma_limit_t *limit,
  599. struct gma_clock_t *clock)
  600. {
  601. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  602. GMA_PLL_INVALID("p1 out of range");
  603. if (clock->p < limit->p.min || limit->p.max < clock->p)
  604. GMA_PLL_INVALID("p out of range");
  605. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  606. GMA_PLL_INVALID("m2 out of range");
  607. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  608. GMA_PLL_INVALID("m1 out of range");
  609. /* On CDV m1 is always 0 */
  610. if (clock->m1 <= clock->m2 && clock->m1 != 0)
  611. GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
  612. if (clock->m < limit->m.min || limit->m.max < clock->m)
  613. GMA_PLL_INVALID("m out of range");
  614. if (clock->n < limit->n.min || limit->n.max < clock->n)
  615. GMA_PLL_INVALID("n out of range");
  616. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  617. GMA_PLL_INVALID("vco out of range");
  618. /* XXX: We may need to be checking "Dot clock"
  619. * depending on the multiplier, connector, etc.,
  620. * rather than just a single range.
  621. */
  622. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  623. GMA_PLL_INVALID("dot out of range");
  624. return true;
  625. }
  626. bool gma_find_best_pll(const struct gma_limit_t *limit,
  627. struct drm_crtc *crtc, int target, int refclk,
  628. struct gma_clock_t *best_clock)
  629. {
  630. struct drm_device *dev = crtc->dev;
  631. const struct gma_clock_funcs *clock_funcs =
  632. to_gma_crtc(crtc)->clock_funcs;
  633. struct gma_clock_t clock;
  634. int err = target;
  635. if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  636. (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
  637. /*
  638. * For LVDS, if the panel is on, just rely on its current
  639. * settings for dual-channel. We haven't figured out how to
  640. * reliably set up different single/dual channel state, if we
  641. * even can.
  642. */
  643. if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  644. LVDS_CLKB_POWER_UP)
  645. clock.p2 = limit->p2.p2_fast;
  646. else
  647. clock.p2 = limit->p2.p2_slow;
  648. } else {
  649. if (target < limit->p2.dot_limit)
  650. clock.p2 = limit->p2.p2_slow;
  651. else
  652. clock.p2 = limit->p2.p2_fast;
  653. }
  654. memset(best_clock, 0, sizeof(*best_clock));
  655. /* m1 is always 0 on CDV so the outmost loop will run just once */
  656. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  657. for (clock.m2 = limit->m2.min;
  658. (clock.m2 < clock.m1 || clock.m1 == 0) &&
  659. clock.m2 <= limit->m2.max; clock.m2++) {
  660. for (clock.n = limit->n.min;
  661. clock.n <= limit->n.max; clock.n++) {
  662. for (clock.p1 = limit->p1.min;
  663. clock.p1 <= limit->p1.max;
  664. clock.p1++) {
  665. int this_err;
  666. clock_funcs->clock(refclk, &clock);
  667. if (!clock_funcs->pll_is_valid(crtc,
  668. limit, &clock))
  669. continue;
  670. this_err = abs(clock.dot - target);
  671. if (this_err < err) {
  672. *best_clock = clock;
  673. err = this_err;
  674. }
  675. }
  676. }
  677. }
  678. }
  679. return err != target;
  680. }