gud_drv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright 2020 Noralf Trønnes
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/lz4.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/string_helpers.h>
  11. #include <linux/usb.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/workqueue.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include <drm/drm_blend.h>
  16. #include <drm/drm_damage_helper.h>
  17. #include <drm/drm_debugfs.h>
  18. #include <drm/drm_drv.h>
  19. #include <drm/drm_fb_helper.h>
  20. #include <drm/drm_fourcc.h>
  21. #include <drm/drm_gem_atomic_helper.h>
  22. #include <drm/drm_gem_framebuffer_helper.h>
  23. #include <drm/drm_gem_shmem_helper.h>
  24. #include <drm/drm_managed.h>
  25. #include <drm/drm_print.h>
  26. #include <drm/drm_probe_helper.h>
  27. #include <drm/drm_simple_kms_helper.h>
  28. #include <drm/gud.h>
  29. #include "gud_internal.h"
  30. /* Only used internally */
  31. static const struct drm_format_info gud_drm_format_r1 = {
  32. .format = GUD_DRM_FORMAT_R1,
  33. .num_planes = 1,
  34. .char_per_block = { 1, 0, 0 },
  35. .block_w = { 8, 0, 0 },
  36. .block_h = { 1, 0, 0 },
  37. .hsub = 1,
  38. .vsub = 1,
  39. };
  40. static const struct drm_format_info gud_drm_format_xrgb1111 = {
  41. .format = GUD_DRM_FORMAT_XRGB1111,
  42. .num_planes = 1,
  43. .char_per_block = { 1, 0, 0 },
  44. .block_w = { 2, 0, 0 },
  45. .block_h = { 1, 0, 0 },
  46. .hsub = 1,
  47. .vsub = 1,
  48. };
  49. static int gud_usb_control_msg(struct usb_interface *intf, bool in,
  50. u8 request, u16 value, void *buf, size_t len)
  51. {
  52. u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
  53. u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
  54. struct usb_device *usb = interface_to_usbdev(intf);
  55. unsigned int pipe;
  56. if (len && !buf)
  57. return -EINVAL;
  58. if (in) {
  59. pipe = usb_rcvctrlpipe(usb, 0);
  60. requesttype |= USB_DIR_IN;
  61. } else {
  62. pipe = usb_sndctrlpipe(usb, 0);
  63. requesttype |= USB_DIR_OUT;
  64. }
  65. return usb_control_msg(usb, pipe, request, requesttype, value,
  66. ifnum, buf, len, USB_CTRL_GET_TIMEOUT);
  67. }
  68. static int gud_get_display_descriptor(struct usb_interface *intf,
  69. struct gud_display_descriptor_req *desc)
  70. {
  71. void *buf;
  72. int ret;
  73. buf = kmalloc(sizeof(*desc), GFP_KERNEL);
  74. if (!buf)
  75. return -ENOMEM;
  76. ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc));
  77. memcpy(desc, buf, sizeof(*desc));
  78. kfree(buf);
  79. if (ret < 0)
  80. return ret;
  81. if (ret != sizeof(*desc))
  82. return -EIO;
  83. if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC))
  84. return -ENODATA;
  85. DRM_DEV_DEBUG_DRIVER(&intf->dev,
  86. "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n",
  87. desc->version, le32_to_cpu(desc->flags), desc->compression,
  88. le32_to_cpu(desc->max_buffer_size));
  89. if (!desc->version || !desc->max_width || !desc->max_height ||
  90. le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) ||
  91. le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height))
  92. return -EINVAL;
  93. return 0;
  94. }
  95. static int gud_status_to_errno(u8 status)
  96. {
  97. switch (status) {
  98. case GUD_STATUS_OK:
  99. return 0;
  100. case GUD_STATUS_BUSY:
  101. return -EBUSY;
  102. case GUD_STATUS_REQUEST_NOT_SUPPORTED:
  103. return -EOPNOTSUPP;
  104. case GUD_STATUS_PROTOCOL_ERROR:
  105. return -EPROTO;
  106. case GUD_STATUS_INVALID_PARAMETER:
  107. return -EINVAL;
  108. case GUD_STATUS_ERROR:
  109. return -EREMOTEIO;
  110. default:
  111. return -EREMOTEIO;
  112. }
  113. }
  114. static int gud_usb_get_status(struct usb_interface *intf)
  115. {
  116. int ret, status = -EIO;
  117. u8 *buf;
  118. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  119. if (!buf)
  120. return -ENOMEM;
  121. ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf));
  122. if (ret == sizeof(*buf))
  123. status = gud_status_to_errno(*buf);
  124. kfree(buf);
  125. if (ret < 0)
  126. return ret;
  127. return status;
  128. }
  129. static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index,
  130. void *buf, size_t len)
  131. {
  132. struct usb_interface *intf = to_usb_interface(gdrm->drm.dev);
  133. int idx, ret;
  134. drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n",
  135. in ? "get" : "set", request, index, len);
  136. if (!drm_dev_enter(&gdrm->drm, &idx))
  137. return -ENODEV;
  138. mutex_lock(&gdrm->ctrl_lock);
  139. ret = gud_usb_control_msg(intf, in, request, index, buf, len);
  140. if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) {
  141. int status;
  142. status = gud_usb_get_status(intf);
  143. if (status < 0) {
  144. ret = status;
  145. } else if (ret < 0) {
  146. dev_err_once(gdrm->drm.dev,
  147. "Unexpected status OK for failed transfer\n");
  148. ret = -EPIPE;
  149. }
  150. }
  151. if (ret < 0) {
  152. drm_dbg(&gdrm->drm, "ret=%d\n", ret);
  153. gdrm->stats_num_errors++;
  154. }
  155. mutex_unlock(&gdrm->ctrl_lock);
  156. drm_dev_exit(idx);
  157. return ret;
  158. }
  159. /*
  160. * @buf cannot be allocated on the stack.
  161. * Returns number of bytes received or negative error code on failure.
  162. */
  163. int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len)
  164. {
  165. return gud_usb_transfer(gdrm, true, request, index, buf, max_len);
  166. }
  167. /*
  168. * @buf can be allocated on the stack or NULL.
  169. * Returns zero on success or negative error code on failure.
  170. */
  171. int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len)
  172. {
  173. void *trbuf = NULL;
  174. int ret;
  175. if (buf && len) {
  176. trbuf = kmemdup(buf, len, GFP_KERNEL);
  177. if (!trbuf)
  178. return -ENOMEM;
  179. }
  180. ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len);
  181. kfree(trbuf);
  182. if (ret < 0)
  183. return ret;
  184. return ret != len ? -EIO : 0;
  185. }
  186. /*
  187. * @val can be allocated on the stack.
  188. * Returns zero on success or negative error code on failure.
  189. */
  190. int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val)
  191. {
  192. u8 *buf;
  193. int ret;
  194. buf = kmalloc(sizeof(*val), GFP_KERNEL);
  195. if (!buf)
  196. return -ENOMEM;
  197. ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val));
  198. *val = *buf;
  199. kfree(buf);
  200. if (ret < 0)
  201. return ret;
  202. return ret != sizeof(*val) ? -EIO : 0;
  203. }
  204. /* Returns zero on success or negative error code on failure. */
  205. int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
  206. {
  207. return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
  208. }
  209. static int gud_get_properties(struct gud_device *gdrm)
  210. {
  211. struct gud_property_req *properties;
  212. unsigned int i, num_properties;
  213. int ret;
  214. properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
  215. if (!properties)
  216. return -ENOMEM;
  217. ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0,
  218. properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties));
  219. if (ret <= 0)
  220. goto out;
  221. if (ret % sizeof(*properties)) {
  222. ret = -EIO;
  223. goto out;
  224. }
  225. num_properties = ret / sizeof(*properties);
  226. ret = 0;
  227. gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties),
  228. GFP_KERNEL);
  229. if (!gdrm->properties) {
  230. ret = -ENOMEM;
  231. goto out;
  232. }
  233. for (i = 0; i < num_properties; i++) {
  234. u16 prop = le16_to_cpu(properties[i].prop);
  235. u64 val = le64_to_cpu(properties[i].val);
  236. switch (prop) {
  237. case GUD_PROPERTY_ROTATION:
  238. /*
  239. * DRM UAPI matches the protocol so use the value directly,
  240. * but mask out any additions on future devices.
  241. */
  242. val &= GUD_ROTATION_MASK;
  243. ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
  244. DRM_MODE_ROTATE_0, val);
  245. break;
  246. default:
  247. /* New ones might show up in future devices, skip those we don't know. */
  248. drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop);
  249. continue;
  250. }
  251. if (ret)
  252. goto out;
  253. gdrm->properties[gdrm->num_properties++] = prop;
  254. }
  255. out:
  256. kfree(properties);
  257. return ret;
  258. }
  259. /*
  260. * FIXME: Dma-buf sharing requires DMA support by the importing device.
  261. * This function is a workaround to make USB devices work as well.
  262. * See todo.rst for how to fix the issue in the dma-buf framework.
  263. */
  264. static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
  265. {
  266. struct gud_device *gdrm = to_gud_device(drm);
  267. if (!gdrm->dmadev)
  268. return ERR_PTR(-ENODEV);
  269. return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
  270. }
  271. static int gud_stats_debugfs(struct seq_file *m, void *data)
  272. {
  273. struct drm_info_node *node = m->private;
  274. struct gud_device *gdrm = to_gud_device(node->minor->dev);
  275. char buf[10];
  276. string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
  277. seq_printf(m, "Max buffer size: %s\n", buf);
  278. seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors);
  279. seq_puts(m, "Compression: ");
  280. if (gdrm->compression & GUD_COMPRESSION_LZ4)
  281. seq_puts(m, " lz4");
  282. if (!gdrm->compression)
  283. seq_puts(m, " none");
  284. seq_puts(m, "\n");
  285. if (gdrm->compression) {
  286. u64 remainder;
  287. u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length,
  288. &remainder);
  289. u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length);
  290. seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac);
  291. }
  292. return 0;
  293. }
  294. static const struct drm_info_list gud_debugfs_list[] = {
  295. { "stats", gud_stats_debugfs, 0, NULL },
  296. };
  297. static void gud_debugfs_init(struct drm_minor *minor)
  298. {
  299. drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
  300. minor->debugfs_root, minor);
  301. }
  302. static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
  303. .check = gud_pipe_check,
  304. .update = gud_pipe_update,
  305. };
  306. static const struct drm_mode_config_funcs gud_mode_config_funcs = {
  307. .fb_create = drm_gem_fb_create_with_dirty,
  308. .atomic_check = drm_atomic_helper_check,
  309. .atomic_commit = drm_atomic_helper_commit,
  310. };
  311. static const u64 gud_pipe_modifiers[] = {
  312. DRM_FORMAT_MOD_LINEAR,
  313. DRM_FORMAT_MOD_INVALID
  314. };
  315. DEFINE_DRM_GEM_FOPS(gud_fops);
  316. static const struct drm_driver gud_drm_driver = {
  317. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
  318. .fops = &gud_fops,
  319. DRM_GEM_SHMEM_DRIVER_OPS,
  320. .gem_prime_import = gud_gem_prime_import,
  321. .debugfs_init = gud_debugfs_init,
  322. .name = "gud",
  323. .desc = "Generic USB Display",
  324. .date = "20200422",
  325. .major = 1,
  326. .minor = 0,
  327. };
  328. static int gud_alloc_bulk_buffer(struct gud_device *gdrm)
  329. {
  330. unsigned int i, num_pages;
  331. struct page **pages;
  332. void *ptr;
  333. int ret;
  334. gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len);
  335. if (!gdrm->bulk_buf)
  336. return -ENOMEM;
  337. num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE);
  338. pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
  339. if (!pages)
  340. return -ENOMEM;
  341. for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
  342. pages[i] = vmalloc_to_page(ptr);
  343. ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages,
  344. 0, gdrm->bulk_len, GFP_KERNEL);
  345. kfree(pages);
  346. return ret;
  347. }
  348. static void gud_free_buffers_and_mutex(void *data)
  349. {
  350. struct gud_device *gdrm = data;
  351. vfree(gdrm->compress_buf);
  352. gdrm->compress_buf = NULL;
  353. sg_free_table(&gdrm->bulk_sgt);
  354. vfree(gdrm->bulk_buf);
  355. gdrm->bulk_buf = NULL;
  356. mutex_destroy(&gdrm->ctrl_lock);
  357. }
  358. static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
  359. {
  360. const struct drm_format_info *xrgb8888_emulation_format = NULL;
  361. bool rgb565_supported = false, xrgb8888_supported = false;
  362. unsigned int num_formats_dev, num_formats = 0;
  363. struct usb_endpoint_descriptor *bulk_out;
  364. struct gud_display_descriptor_req desc;
  365. struct device *dev = &intf->dev;
  366. size_t max_buffer_size = 0;
  367. struct gud_device *gdrm;
  368. struct drm_device *drm;
  369. u8 *formats_dev;
  370. u32 *formats;
  371. int ret, i;
  372. ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out);
  373. if (ret)
  374. return ret;
  375. ret = gud_get_display_descriptor(intf, &desc);
  376. if (ret) {
  377. DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret);
  378. return -ENODEV;
  379. }
  380. if (desc.version > 1) {
  381. dev_err(dev, "Protocol version %u is not supported\n", desc.version);
  382. return -ENODEV;
  383. }
  384. gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm);
  385. if (IS_ERR(gdrm))
  386. return PTR_ERR(gdrm);
  387. drm = &gdrm->drm;
  388. drm->mode_config.funcs = &gud_mode_config_funcs;
  389. ret = drmm_mode_config_init(drm);
  390. if (ret)
  391. return ret;
  392. gdrm->flags = le32_to_cpu(desc.flags);
  393. gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
  394. if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression)
  395. return -EINVAL;
  396. mutex_init(&gdrm->ctrl_lock);
  397. mutex_init(&gdrm->damage_lock);
  398. INIT_WORK(&gdrm->work, gud_flush_work);
  399. gud_clear_damage(gdrm);
  400. ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm);
  401. if (ret)
  402. return ret;
  403. drm->mode_config.min_width = le32_to_cpu(desc.min_width);
  404. drm->mode_config.max_width = le32_to_cpu(desc.max_width);
  405. drm->mode_config.min_height = le32_to_cpu(desc.min_height);
  406. drm->mode_config.max_height = le32_to_cpu(desc.max_height);
  407. formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
  408. /* Add room for emulated XRGB8888 */
  409. formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
  410. if (!formats_dev || !formats)
  411. return -ENOMEM;
  412. ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM);
  413. if (ret < 0)
  414. return ret;
  415. num_formats_dev = ret;
  416. for (i = 0; i < num_formats_dev; i++) {
  417. const struct drm_format_info *info;
  418. size_t fmt_buf_size;
  419. u32 format;
  420. format = gud_to_fourcc(formats_dev[i]);
  421. if (!format) {
  422. drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]);
  423. continue;
  424. }
  425. if (format == GUD_DRM_FORMAT_R1)
  426. info = &gud_drm_format_r1;
  427. else if (format == GUD_DRM_FORMAT_XRGB1111)
  428. info = &gud_drm_format_xrgb1111;
  429. else
  430. info = drm_format_info(format);
  431. switch (format) {
  432. case GUD_DRM_FORMAT_R1:
  433. fallthrough;
  434. case DRM_FORMAT_R8:
  435. fallthrough;
  436. case GUD_DRM_FORMAT_XRGB1111:
  437. fallthrough;
  438. case DRM_FORMAT_RGB332:
  439. fallthrough;
  440. case DRM_FORMAT_RGB888:
  441. if (!xrgb8888_emulation_format)
  442. xrgb8888_emulation_format = info;
  443. break;
  444. case DRM_FORMAT_RGB565:
  445. rgb565_supported = true;
  446. if (!xrgb8888_emulation_format)
  447. xrgb8888_emulation_format = info;
  448. break;
  449. case DRM_FORMAT_XRGB8888:
  450. xrgb8888_supported = true;
  451. break;
  452. }
  453. fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) *
  454. drm->mode_config.max_height;
  455. max_buffer_size = max(max_buffer_size, fmt_buf_size);
  456. if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111)
  457. continue; /* Internal not for userspace */
  458. formats[num_formats++] = format;
  459. }
  460. if (!num_formats && !xrgb8888_emulation_format) {
  461. dev_err(dev, "No supported pixel formats found\n");
  462. return -EINVAL;
  463. }
  464. /* Prefer speed over color depth */
  465. if (rgb565_supported)
  466. drm->mode_config.preferred_depth = 16;
  467. if (!xrgb8888_supported && xrgb8888_emulation_format) {
  468. gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format;
  469. formats[num_formats++] = DRM_FORMAT_XRGB8888;
  470. }
  471. if (desc.max_buffer_size)
  472. max_buffer_size = le32_to_cpu(desc.max_buffer_size);
  473. /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */
  474. if (max_buffer_size > SZ_64M)
  475. max_buffer_size = SZ_64M;
  476. gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
  477. gdrm->bulk_len = max_buffer_size;
  478. ret = gud_alloc_bulk_buffer(gdrm);
  479. if (ret)
  480. return ret;
  481. if (gdrm->compression & GUD_COMPRESSION_LZ4) {
  482. gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
  483. if (!gdrm->lz4_comp_mem)
  484. return -ENOMEM;
  485. gdrm->compress_buf = vmalloc(gdrm->bulk_len);
  486. if (!gdrm->compress_buf)
  487. return -ENOMEM;
  488. }
  489. ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
  490. formats, num_formats,
  491. gud_pipe_modifiers, NULL);
  492. if (ret)
  493. return ret;
  494. devm_kfree(dev, formats);
  495. devm_kfree(dev, formats_dev);
  496. ret = gud_get_properties(gdrm);
  497. if (ret) {
  498. dev_err(dev, "Failed to get properties (error=%d)\n", ret);
  499. return ret;
  500. }
  501. drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
  502. ret = gud_get_connectors(gdrm);
  503. if (ret) {
  504. dev_err(dev, "Failed to get connectors (error=%d)\n", ret);
  505. return ret;
  506. }
  507. drm_mode_config_reset(drm);
  508. usb_set_intfdata(intf, gdrm);
  509. gdrm->dmadev = usb_intf_get_dma_device(intf);
  510. if (!gdrm->dmadev)
  511. dev_warn(dev, "buffer sharing not supported");
  512. ret = drm_dev_register(drm, 0);
  513. if (ret) {
  514. put_device(gdrm->dmadev);
  515. return ret;
  516. }
  517. drm_kms_helper_poll_init(drm);
  518. drm_fbdev_generic_setup(drm, 0);
  519. return 0;
  520. }
  521. static void gud_disconnect(struct usb_interface *interface)
  522. {
  523. struct gud_device *gdrm = usb_get_intfdata(interface);
  524. struct drm_device *drm = &gdrm->drm;
  525. drm_dbg(drm, "%s:\n", __func__);
  526. drm_kms_helper_poll_fini(drm);
  527. drm_dev_unplug(drm);
  528. drm_atomic_helper_shutdown(drm);
  529. put_device(gdrm->dmadev);
  530. gdrm->dmadev = NULL;
  531. }
  532. static int gud_suspend(struct usb_interface *intf, pm_message_t message)
  533. {
  534. struct gud_device *gdrm = usb_get_intfdata(intf);
  535. return drm_mode_config_helper_suspend(&gdrm->drm);
  536. }
  537. static int gud_resume(struct usb_interface *intf)
  538. {
  539. struct gud_device *gdrm = usb_get_intfdata(intf);
  540. drm_mode_config_helper_resume(&gdrm->drm);
  541. return 0;
  542. }
  543. static const struct usb_device_id gud_id_table[] = {
  544. { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) },
  545. { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) },
  546. { }
  547. };
  548. MODULE_DEVICE_TABLE(usb, gud_id_table);
  549. static struct usb_driver gud_usb_driver = {
  550. .name = "gud",
  551. .probe = gud_probe,
  552. .disconnect = gud_disconnect,
  553. .id_table = gud_id_table,
  554. .suspend = gud_suspend,
  555. .resume = gud_resume,
  556. .reset_resume = gud_resume,
  557. };
  558. module_usb_driver(gud_usb_driver);
  559. MODULE_AUTHOR("Noralf Trønnes");
  560. MODULE_LICENSE("Dual MIT/GPL");