gud_pipe.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright 2020 Noralf Trønnes
  4. */
  5. #include <linux/lz4.h>
  6. #include <linux/usb.h>
  7. #include <linux/workqueue.h>
  8. #include <drm/drm_atomic.h>
  9. #include <drm/drm_connector.h>
  10. #include <drm/drm_damage_helper.h>
  11. #include <drm/drm_drv.h>
  12. #include <drm/drm_format_helper.h>
  13. #include <drm/drm_fourcc.h>
  14. #include <drm/drm_framebuffer.h>
  15. #include <drm/drm_gem.h>
  16. #include <drm/drm_gem_framebuffer_helper.h>
  17. #include <drm/drm_print.h>
  18. #include <drm/drm_rect.h>
  19. #include <drm/drm_simple_kms_helper.h>
  20. #include <drm/gud.h>
  21. #include "gud_internal.h"
  22. /*
  23. * Some userspace rendering loops runs all displays in the same loop.
  24. * This means that a fast display will have to wait for a slow one.
  25. * For this reason gud does flushing asynchronous by default.
  26. * The down side is that in e.g. a single display setup userspace thinks
  27. * the display is insanely fast since the driver reports back immediately
  28. * that the flush/pageflip is done. This wastes CPU and power.
  29. * Such users might want to set this module parameter to false.
  30. */
  31. static bool gud_async_flush = true;
  32. module_param_named(async_flush, gud_async_flush, bool, 0644);
  33. MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
  34. /*
  35. * FIXME: The driver is probably broken on Big Endian machines.
  36. * See discussion:
  37. * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
  38. */
  39. static bool gud_is_big_endian(void)
  40. {
  41. #if defined(__BIG_ENDIAN)
  42. return true;
  43. #else
  44. return false;
  45. #endif
  46. }
  47. static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
  48. void *src, struct drm_framebuffer *fb,
  49. struct drm_rect *rect)
  50. {
  51. unsigned int block_width = drm_format_info_block_width(format, 0);
  52. unsigned int bits_per_pixel = 8 / block_width;
  53. unsigned int x, y, width, height;
  54. u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
  55. struct iosys_map dst_map, vmap;
  56. size_t len;
  57. void *buf;
  58. WARN_ON_ONCE(format->char_per_block[0] != 1);
  59. /* Start on a byte boundary */
  60. rect->x1 = ALIGN_DOWN(rect->x1, block_width);
  61. width = drm_rect_width(rect);
  62. height = drm_rect_height(rect);
  63. len = drm_format_info_min_pitch(format, 0, width) * height;
  64. buf = kmalloc(width * height, GFP_KERNEL);
  65. if (!buf)
  66. return 0;
  67. iosys_map_set_vaddr(&dst_map, buf);
  68. iosys_map_set_vaddr(&vmap, src);
  69. drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
  70. pix8 = buf;
  71. for (y = 0; y < height; y++) {
  72. for (x = 0; x < width; x++) {
  73. unsigned int pixpos = x % block_width; /* within byte from the left */
  74. unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
  75. if (!pixpos) {
  76. block = dst++;
  77. *block = 0;
  78. }
  79. pix = (*pix8++) >> (8 - bits_per_pixel);
  80. *block |= pix << pixshift;
  81. }
  82. }
  83. kfree(buf);
  84. return len;
  85. }
  86. static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
  87. void *src, struct drm_framebuffer *fb,
  88. struct drm_rect *rect)
  89. {
  90. unsigned int block_width = drm_format_info_block_width(format, 0);
  91. unsigned int bits_per_pixel = 8 / block_width;
  92. u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
  93. unsigned int x, y, width;
  94. __le32 *sbuf32;
  95. u32 pix32;
  96. size_t len;
  97. /* Start on a byte boundary */
  98. rect->x1 = ALIGN_DOWN(rect->x1, block_width);
  99. width = drm_rect_width(rect);
  100. len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
  101. for (y = rect->y1; y < rect->y2; y++) {
  102. sbuf32 = src + (y * fb->pitches[0]);
  103. sbuf32 += rect->x1;
  104. for (x = 0; x < width; x++) {
  105. unsigned int pixpos = x % block_width; /* within byte from the left */
  106. unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
  107. if (!pixpos) {
  108. block = dst++;
  109. *block = 0;
  110. }
  111. pix32 = le32_to_cpu(*sbuf32++);
  112. r = pix32 >> 16;
  113. g = pix32 >> 8;
  114. b = pix32;
  115. switch (format->format) {
  116. case GUD_DRM_FORMAT_XRGB1111:
  117. pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
  118. break;
  119. default:
  120. WARN_ON_ONCE(1);
  121. return len;
  122. }
  123. *block |= pix << pixshift;
  124. }
  125. }
  126. return len;
  127. }
  128. static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
  129. const struct drm_format_info *format, struct drm_rect *rect,
  130. struct gud_set_buffer_req *req)
  131. {
  132. struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
  133. u8 compression = gdrm->compression;
  134. struct iosys_map map[DRM_FORMAT_MAX_PLANES] = { };
  135. struct iosys_map map_data[DRM_FORMAT_MAX_PLANES] = { };
  136. struct iosys_map dst;
  137. void *vaddr, *buf;
  138. size_t pitch, len;
  139. int ret = 0;
  140. pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
  141. len = pitch * drm_rect_height(rect);
  142. if (len > gdrm->bulk_len)
  143. return -E2BIG;
  144. ret = drm_gem_fb_vmap(fb, map, map_data);
  145. if (ret)
  146. return ret;
  147. vaddr = map_data[0].vaddr;
  148. ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
  149. if (ret)
  150. goto vunmap;
  151. retry:
  152. if (compression)
  153. buf = gdrm->compress_buf;
  154. else
  155. buf = gdrm->bulk_buf;
  156. iosys_map_set_vaddr(&dst, buf);
  157. /*
  158. * Imported buffers are assumed to be write-combined and thus uncached
  159. * with slow reads (at least on ARM).
  160. */
  161. if (format != fb->format) {
  162. if (format->format == GUD_DRM_FORMAT_R1) {
  163. len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
  164. if (!len) {
  165. ret = -ENOMEM;
  166. goto end_cpu_access;
  167. }
  168. } else if (format->format == DRM_FORMAT_R8) {
  169. drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
  170. } else if (format->format == DRM_FORMAT_RGB332) {
  171. drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
  172. } else if (format->format == DRM_FORMAT_RGB565) {
  173. drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
  174. gud_is_big_endian());
  175. } else if (format->format == DRM_FORMAT_RGB888) {
  176. drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
  177. } else {
  178. len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
  179. }
  180. } else if (gud_is_big_endian() && format->cpp[0] > 1) {
  181. drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
  182. } else if (compression && !import_attach && pitch == fb->pitches[0]) {
  183. /* can compress directly from the framebuffer */
  184. buf = vaddr + rect->y1 * pitch;
  185. } else {
  186. drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
  187. }
  188. memset(req, 0, sizeof(*req));
  189. req->x = cpu_to_le32(rect->x1);
  190. req->y = cpu_to_le32(rect->y1);
  191. req->width = cpu_to_le32(drm_rect_width(rect));
  192. req->height = cpu_to_le32(drm_rect_height(rect));
  193. req->length = cpu_to_le32(len);
  194. if (compression & GUD_COMPRESSION_LZ4) {
  195. int complen;
  196. complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
  197. if (complen <= 0) {
  198. compression = 0;
  199. goto retry;
  200. }
  201. req->compression = GUD_COMPRESSION_LZ4;
  202. req->compressed_length = cpu_to_le32(complen);
  203. }
  204. end_cpu_access:
  205. drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
  206. vunmap:
  207. drm_gem_fb_vunmap(fb, map);
  208. return ret;
  209. }
  210. struct gud_usb_bulk_context {
  211. struct timer_list timer;
  212. struct usb_sg_request sgr;
  213. };
  214. static void gud_usb_bulk_timeout(struct timer_list *t)
  215. {
  216. struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
  217. usb_sg_cancel(&ctx->sgr);
  218. }
  219. static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
  220. {
  221. struct gud_usb_bulk_context ctx;
  222. int ret;
  223. ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
  224. gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
  225. if (ret)
  226. return ret;
  227. timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
  228. mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
  229. usb_sg_wait(&ctx.sgr);
  230. if (!del_timer_sync(&ctx.timer))
  231. ret = -ETIMEDOUT;
  232. else if (ctx.sgr.status < 0)
  233. ret = ctx.sgr.status;
  234. else if (ctx.sgr.bytes != len)
  235. ret = -EIO;
  236. destroy_timer_on_stack(&ctx.timer);
  237. return ret;
  238. }
  239. static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
  240. const struct drm_format_info *format, struct drm_rect *rect)
  241. {
  242. struct gud_set_buffer_req req;
  243. size_t len, trlen;
  244. int ret;
  245. drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
  246. ret = gud_prep_flush(gdrm, fb, format, rect, &req);
  247. if (ret)
  248. return ret;
  249. len = le32_to_cpu(req.length);
  250. if (req.compression)
  251. trlen = le32_to_cpu(req.compressed_length);
  252. else
  253. trlen = len;
  254. gdrm->stats_length += len;
  255. /* Did it wrap around? */
  256. if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
  257. gdrm->stats_length = len;
  258. gdrm->stats_actual_length = 0;
  259. }
  260. gdrm->stats_actual_length += trlen;
  261. if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
  262. ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
  263. if (ret)
  264. return ret;
  265. }
  266. ret = gud_usb_bulk(gdrm, trlen);
  267. if (ret)
  268. gdrm->stats_num_errors++;
  269. return ret;
  270. }
  271. void gud_clear_damage(struct gud_device *gdrm)
  272. {
  273. gdrm->damage.x1 = INT_MAX;
  274. gdrm->damage.y1 = INT_MAX;
  275. gdrm->damage.x2 = 0;
  276. gdrm->damage.y2 = 0;
  277. }
  278. static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
  279. {
  280. gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
  281. gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
  282. gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
  283. gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
  284. }
  285. static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
  286. struct drm_rect *damage)
  287. {
  288. /*
  289. * pipe_update waits for the worker when the display mode is going to change.
  290. * This ensures that the width and height is still the same making it safe to
  291. * add back the damage.
  292. */
  293. mutex_lock(&gdrm->damage_lock);
  294. if (!gdrm->fb) {
  295. drm_framebuffer_get(fb);
  296. gdrm->fb = fb;
  297. }
  298. gud_add_damage(gdrm, damage);
  299. mutex_unlock(&gdrm->damage_lock);
  300. /* Retry only once to avoid a possible storm in case of continues errors. */
  301. if (!gdrm->prev_flush_failed)
  302. queue_work(system_long_wq, &gdrm->work);
  303. gdrm->prev_flush_failed = true;
  304. }
  305. void gud_flush_work(struct work_struct *work)
  306. {
  307. struct gud_device *gdrm = container_of(work, struct gud_device, work);
  308. const struct drm_format_info *format;
  309. struct drm_framebuffer *fb;
  310. struct drm_rect damage;
  311. unsigned int i, lines;
  312. int idx, ret = 0;
  313. size_t pitch;
  314. if (!drm_dev_enter(&gdrm->drm, &idx))
  315. return;
  316. mutex_lock(&gdrm->damage_lock);
  317. fb = gdrm->fb;
  318. gdrm->fb = NULL;
  319. damage = gdrm->damage;
  320. gud_clear_damage(gdrm);
  321. mutex_unlock(&gdrm->damage_lock);
  322. if (!fb)
  323. goto out;
  324. format = fb->format;
  325. if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
  326. format = gdrm->xrgb8888_emulation_format;
  327. /* Split update if it's too big */
  328. pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
  329. lines = drm_rect_height(&damage);
  330. if (gdrm->bulk_len < lines * pitch)
  331. lines = gdrm->bulk_len / pitch;
  332. for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
  333. struct drm_rect rect = damage;
  334. rect.y1 += i * lines;
  335. rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
  336. ret = gud_flush_rect(gdrm, fb, format, &rect);
  337. if (ret) {
  338. if (ret != -ENODEV && ret != -ECONNRESET &&
  339. ret != -ESHUTDOWN && ret != -EPROTO) {
  340. bool prev_flush_failed = gdrm->prev_flush_failed;
  341. gud_retry_failed_flush(gdrm, fb, &damage);
  342. if (!prev_flush_failed)
  343. dev_err_ratelimited(fb->dev->dev,
  344. "Failed to flush framebuffer: error=%d\n", ret);
  345. }
  346. break;
  347. }
  348. gdrm->prev_flush_failed = false;
  349. }
  350. drm_framebuffer_put(fb);
  351. out:
  352. drm_dev_exit(idx);
  353. }
  354. static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
  355. struct drm_rect *damage)
  356. {
  357. struct drm_framebuffer *old_fb = NULL;
  358. mutex_lock(&gdrm->damage_lock);
  359. if (fb != gdrm->fb) {
  360. old_fb = gdrm->fb;
  361. drm_framebuffer_get(fb);
  362. gdrm->fb = fb;
  363. }
  364. gud_add_damage(gdrm, damage);
  365. mutex_unlock(&gdrm->damage_lock);
  366. queue_work(system_long_wq, &gdrm->work);
  367. if (old_fb)
  368. drm_framebuffer_put(old_fb);
  369. }
  370. int gud_pipe_check(struct drm_simple_display_pipe *pipe,
  371. struct drm_plane_state *new_plane_state,
  372. struct drm_crtc_state *new_crtc_state)
  373. {
  374. struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
  375. struct drm_plane_state *old_plane_state = pipe->plane.state;
  376. const struct drm_display_mode *mode = &new_crtc_state->mode;
  377. struct drm_atomic_state *state = new_plane_state->state;
  378. struct drm_framebuffer *old_fb = old_plane_state->fb;
  379. struct drm_connector_state *connector_state = NULL;
  380. struct drm_framebuffer *fb = new_plane_state->fb;
  381. const struct drm_format_info *format = fb->format;
  382. struct drm_connector *connector;
  383. unsigned int i, num_properties;
  384. struct gud_state_req *req;
  385. int idx, ret;
  386. size_t len;
  387. if (WARN_ON_ONCE(!fb))
  388. return -EINVAL;
  389. if (old_plane_state->rotation != new_plane_state->rotation)
  390. new_crtc_state->mode_changed = true;
  391. if (old_fb && old_fb->format != format)
  392. new_crtc_state->mode_changed = true;
  393. if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
  394. return 0;
  395. /* Only one connector is supported */
  396. if (hweight32(new_crtc_state->connector_mask) != 1)
  397. return -EINVAL;
  398. if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
  399. format = gdrm->xrgb8888_emulation_format;
  400. for_each_new_connector_in_state(state, connector, connector_state, i) {
  401. if (connector_state->crtc)
  402. break;
  403. }
  404. /*
  405. * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
  406. * the connector included in the state.
  407. */
  408. if (!connector_state) {
  409. struct drm_connector_list_iter conn_iter;
  410. drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
  411. drm_for_each_connector_iter(connector, &conn_iter) {
  412. if (connector->state->crtc) {
  413. connector_state = connector->state;
  414. break;
  415. }
  416. }
  417. drm_connector_list_iter_end(&conn_iter);
  418. }
  419. if (WARN_ON_ONCE(!connector_state))
  420. return -ENOENT;
  421. len = struct_size(req, properties,
  422. GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
  423. req = kzalloc(len, GFP_KERNEL);
  424. if (!req)
  425. return -ENOMEM;
  426. gud_from_display_mode(&req->mode, mode);
  427. req->format = gud_from_fourcc(format->format);
  428. if (WARN_ON_ONCE(!req->format)) {
  429. ret = -EINVAL;
  430. goto out;
  431. }
  432. req->connector = drm_connector_index(connector_state->connector);
  433. ret = gud_connector_fill_properties(connector_state, req->properties);
  434. if (ret < 0)
  435. goto out;
  436. num_properties = ret;
  437. for (i = 0; i < gdrm->num_properties; i++) {
  438. u16 prop = gdrm->properties[i];
  439. u64 val;
  440. switch (prop) {
  441. case GUD_PROPERTY_ROTATION:
  442. /* DRM UAPI matches the protocol so use value directly */
  443. val = new_plane_state->rotation;
  444. break;
  445. default:
  446. WARN_ON_ONCE(1);
  447. ret = -EINVAL;
  448. goto out;
  449. }
  450. req->properties[num_properties + i].prop = cpu_to_le16(prop);
  451. req->properties[num_properties + i].val = cpu_to_le64(val);
  452. num_properties++;
  453. }
  454. if (drm_dev_enter(fb->dev, &idx)) {
  455. len = struct_size(req, properties, num_properties);
  456. ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
  457. drm_dev_exit(idx);
  458. } else {
  459. ret = -ENODEV;
  460. }
  461. out:
  462. kfree(req);
  463. return ret;
  464. }
  465. void gud_pipe_update(struct drm_simple_display_pipe *pipe,
  466. struct drm_plane_state *old_state)
  467. {
  468. struct drm_device *drm = pipe->crtc.dev;
  469. struct gud_device *gdrm = to_gud_device(drm);
  470. struct drm_plane_state *state = pipe->plane.state;
  471. struct drm_framebuffer *fb = state->fb;
  472. struct drm_crtc *crtc = &pipe->crtc;
  473. struct drm_rect damage;
  474. int idx;
  475. if (crtc->state->mode_changed || !crtc->state->enable) {
  476. cancel_work_sync(&gdrm->work);
  477. mutex_lock(&gdrm->damage_lock);
  478. if (gdrm->fb) {
  479. drm_framebuffer_put(gdrm->fb);
  480. gdrm->fb = NULL;
  481. }
  482. gud_clear_damage(gdrm);
  483. mutex_unlock(&gdrm->damage_lock);
  484. }
  485. if (!drm_dev_enter(drm, &idx))
  486. return;
  487. if (!old_state->fb)
  488. gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
  489. if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
  490. gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
  491. if (crtc->state->active_changed)
  492. gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
  493. if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
  494. if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
  495. drm_rect_init(&damage, 0, 0, fb->width, fb->height);
  496. gud_fb_queue_damage(gdrm, fb, &damage);
  497. if (!gud_async_flush)
  498. flush_work(&gdrm->work);
  499. }
  500. if (!crtc->state->enable)
  501. gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
  502. drm_dev_exit(idx);
  503. }