virtgpu_kms.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <linux/virtio.h>
  26. #include <linux/virtio_config.h>
  27. #include <linux/virtio_ring.h>
  28. #include <drm/drm_file.h>
  29. #include <drm/drm_managed.h>
  30. #include "virtgpu_drv.h"
  31. static void virtio_gpu_config_changed_work_func(struct work_struct *work)
  32. {
  33. struct virtio_gpu_device *vgdev =
  34. container_of(work, struct virtio_gpu_device,
  35. config_changed_work);
  36. u32 events_read, events_clear = 0;
  37. /* read the config space */
  38. virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
  39. events_read, &events_read);
  40. if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
  41. if (vgdev->has_edid)
  42. virtio_gpu_cmd_get_edids(vgdev);
  43. virtio_gpu_cmd_get_display_info(vgdev);
  44. virtio_gpu_notify(vgdev);
  45. drm_helper_hpd_irq_event(vgdev->ddev);
  46. events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
  47. }
  48. virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
  49. events_clear, &events_clear);
  50. }
  51. static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
  52. void (*work_func)(struct work_struct *work))
  53. {
  54. spin_lock_init(&vgvq->qlock);
  55. init_waitqueue_head(&vgvq->ack_queue);
  56. INIT_WORK(&vgvq->dequeue_work, work_func);
  57. }
  58. static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
  59. int num_capsets)
  60. {
  61. int i, ret;
  62. bool invalid_capset_id = false;
  63. struct drm_device *drm = vgdev->ddev;
  64. vgdev->capsets = drmm_kcalloc(drm, num_capsets,
  65. sizeof(struct virtio_gpu_drv_capset),
  66. GFP_KERNEL);
  67. if (!vgdev->capsets) {
  68. DRM_ERROR("failed to allocate cap sets\n");
  69. return;
  70. }
  71. for (i = 0; i < num_capsets; i++) {
  72. virtio_gpu_cmd_get_capset_info(vgdev, i);
  73. virtio_gpu_notify(vgdev);
  74. ret = wait_event_timeout(vgdev->resp_wq,
  75. vgdev->capsets[i].id > 0, 5 * HZ);
  76. /*
  77. * Capability ids are defined in the virtio-gpu spec and are
  78. * between 1 to 63, inclusive.
  79. */
  80. if (!vgdev->capsets[i].id ||
  81. vgdev->capsets[i].id > MAX_CAPSET_ID)
  82. invalid_capset_id = true;
  83. if (ret == 0)
  84. DRM_ERROR("timed out waiting for cap set %d\n", i);
  85. else if (invalid_capset_id)
  86. DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
  87. if (ret == 0 || invalid_capset_id) {
  88. spin_lock(&vgdev->display_info_lock);
  89. drmm_kfree(drm, vgdev->capsets);
  90. vgdev->capsets = NULL;
  91. spin_unlock(&vgdev->display_info_lock);
  92. return;
  93. }
  94. vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
  95. DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
  96. i, vgdev->capsets[i].id,
  97. vgdev->capsets[i].max_version,
  98. vgdev->capsets[i].max_size);
  99. }
  100. vgdev->num_capsets = num_capsets;
  101. }
  102. int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
  103. {
  104. static vq_callback_t *callbacks[] = {
  105. virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
  106. };
  107. static const char * const names[] = { "control", "cursor" };
  108. struct virtio_gpu_device *vgdev;
  109. /* this will expand later */
  110. struct virtqueue *vqs[2];
  111. u32 num_scanouts, num_capsets;
  112. int ret = 0;
  113. if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
  114. return -ENODEV;
  115. vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
  116. if (!vgdev)
  117. return -ENOMEM;
  118. vgdev->ddev = dev;
  119. dev->dev_private = vgdev;
  120. vgdev->vdev = vdev;
  121. spin_lock_init(&vgdev->display_info_lock);
  122. spin_lock_init(&vgdev->resource_export_lock);
  123. spin_lock_init(&vgdev->host_visible_lock);
  124. ida_init(&vgdev->ctx_id_ida);
  125. ida_init(&vgdev->resource_ida);
  126. init_waitqueue_head(&vgdev->resp_wq);
  127. virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
  128. virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
  129. vgdev->fence_drv.context = dma_fence_context_alloc(1);
  130. spin_lock_init(&vgdev->fence_drv.lock);
  131. INIT_LIST_HEAD(&vgdev->fence_drv.fences);
  132. INIT_LIST_HEAD(&vgdev->cap_cache);
  133. INIT_WORK(&vgdev->config_changed_work,
  134. virtio_gpu_config_changed_work_func);
  135. INIT_WORK(&vgdev->obj_free_work,
  136. virtio_gpu_array_put_free_work);
  137. INIT_LIST_HEAD(&vgdev->obj_free_list);
  138. spin_lock_init(&vgdev->obj_free_lock);
  139. #ifdef __LITTLE_ENDIAN
  140. if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
  141. vgdev->has_virgl_3d = true;
  142. #endif
  143. if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
  144. vgdev->has_edid = true;
  145. }
  146. if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
  147. vgdev->has_indirect = true;
  148. }
  149. if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
  150. vgdev->has_resource_assign_uuid = true;
  151. }
  152. if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
  153. vgdev->has_resource_blob = true;
  154. }
  155. if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
  156. VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
  157. if (!devm_request_mem_region(&vgdev->vdev->dev,
  158. vgdev->host_visible_region.addr,
  159. vgdev->host_visible_region.len,
  160. dev_name(&vgdev->vdev->dev))) {
  161. DRM_ERROR("Could not reserve host visible region\n");
  162. ret = -EBUSY;
  163. goto err_vqs;
  164. }
  165. DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
  166. (unsigned long)vgdev->host_visible_region.addr,
  167. (unsigned long)vgdev->host_visible_region.len);
  168. vgdev->has_host_visible = true;
  169. drm_mm_init(&vgdev->host_visible_mm,
  170. (unsigned long)vgdev->host_visible_region.addr,
  171. (unsigned long)vgdev->host_visible_region.len);
  172. }
  173. if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
  174. vgdev->has_context_init = true;
  175. }
  176. DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
  177. vgdev->has_virgl_3d ? '+' : '-',
  178. vgdev->has_edid ? '+' : '-',
  179. vgdev->has_resource_blob ? '+' : '-',
  180. vgdev->has_host_visible ? '+' : '-');
  181. DRM_INFO("features: %ccontext_init\n",
  182. vgdev->has_context_init ? '+' : '-');
  183. ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
  184. if (ret) {
  185. DRM_ERROR("failed to find virt queues\n");
  186. goto err_vqs;
  187. }
  188. vgdev->ctrlq.vq = vqs[0];
  189. vgdev->cursorq.vq = vqs[1];
  190. ret = virtio_gpu_alloc_vbufs(vgdev);
  191. if (ret) {
  192. DRM_ERROR("failed to alloc vbufs\n");
  193. goto err_vbufs;
  194. }
  195. /* get display info */
  196. virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
  197. num_scanouts, &num_scanouts);
  198. vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
  199. VIRTIO_GPU_MAX_SCANOUTS);
  200. if (!vgdev->num_scanouts) {
  201. DRM_ERROR("num_scanouts is zero\n");
  202. ret = -EINVAL;
  203. goto err_scanouts;
  204. }
  205. DRM_INFO("number of scanouts: %d\n", num_scanouts);
  206. virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
  207. num_capsets, &num_capsets);
  208. DRM_INFO("number of cap sets: %d\n", num_capsets);
  209. ret = virtio_gpu_modeset_init(vgdev);
  210. if (ret) {
  211. DRM_ERROR("modeset init failed\n");
  212. goto err_scanouts;
  213. }
  214. virtio_device_ready(vgdev->vdev);
  215. if (num_capsets)
  216. virtio_gpu_get_capsets(vgdev, num_capsets);
  217. if (vgdev->has_edid)
  218. virtio_gpu_cmd_get_edids(vgdev);
  219. virtio_gpu_cmd_get_display_info(vgdev);
  220. virtio_gpu_notify(vgdev);
  221. wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
  222. 5 * HZ);
  223. return 0;
  224. err_scanouts:
  225. virtio_gpu_free_vbufs(vgdev);
  226. err_vbufs:
  227. vgdev->vdev->config->del_vqs(vgdev->vdev);
  228. err_vqs:
  229. dev->dev_private = NULL;
  230. return ret;
  231. }
  232. static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
  233. {
  234. struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
  235. list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
  236. kfree(cache_ent->caps_cache);
  237. kfree(cache_ent);
  238. }
  239. }
  240. void virtio_gpu_deinit(struct drm_device *dev)
  241. {
  242. struct virtio_gpu_device *vgdev = dev->dev_private;
  243. flush_work(&vgdev->obj_free_work);
  244. flush_work(&vgdev->ctrlq.dequeue_work);
  245. flush_work(&vgdev->cursorq.dequeue_work);
  246. flush_work(&vgdev->config_changed_work);
  247. virtio_reset_device(vgdev->vdev);
  248. vgdev->vdev->config->del_vqs(vgdev->vdev);
  249. }
  250. void virtio_gpu_release(struct drm_device *dev)
  251. {
  252. struct virtio_gpu_device *vgdev = dev->dev_private;
  253. if (!vgdev)
  254. return;
  255. virtio_gpu_modeset_fini(vgdev);
  256. virtio_gpu_free_vbufs(vgdev);
  257. virtio_gpu_cleanup_cap_cache(vgdev);
  258. if (vgdev->has_host_visible)
  259. drm_mm_takedown(&vgdev->host_visible_mm);
  260. }
  261. int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
  262. {
  263. struct virtio_gpu_device *vgdev = dev->dev_private;
  264. struct virtio_gpu_fpriv *vfpriv;
  265. int handle;
  266. /* can't create contexts without 3d renderer */
  267. if (!vgdev->has_virgl_3d)
  268. return 0;
  269. /* allocate a virt GPU context for this opener */
  270. vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
  271. if (!vfpriv)
  272. return -ENOMEM;
  273. mutex_init(&vfpriv->context_lock);
  274. handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
  275. if (handle < 0) {
  276. kfree(vfpriv);
  277. return handle;
  278. }
  279. vfpriv->ctx_id = handle + 1;
  280. file->driver_priv = vfpriv;
  281. return 0;
  282. }
  283. void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
  284. {
  285. struct virtio_gpu_device *vgdev = dev->dev_private;
  286. struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  287. if (!vgdev->has_virgl_3d)
  288. return;
  289. if (vfpriv->context_created) {
  290. virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
  291. virtio_gpu_notify(vgdev);
  292. }
  293. ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
  294. mutex_destroy(&vfpriv->context_lock);
  295. kfree(vfpriv);
  296. file->driver_priv = NULL;
  297. }