Merge tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "The biggest thing in this is the AMD Navi GPU support, this again
  contains a bunch of header files that are large. These are the new AMD
  RX5700 GPUs that just recently became available.

  New drivers:
   - ST-Ericsson MCDE driver
   - Ingenic JZ47xx SoC

  UAPI change:
   - HDR source metadata property

  Core:
   - HDR inforframes and EDID parsing
   - drm hdmi infoframe unpacking
   - remove prime sg_table caching into dma-buf
   - New gem vram helpers to reduce driver code
   - Lots of drmP.h removal
   - reservation fencing fix
   - documentation updates
   - drm_fb_helper_connector removed
   - mode name command handler rewrite

  fbcon:
   - Remove the fbcon notifiers

  ttm:
   - forward progress fixes

  dma-buf:
   - make mmap call optional
   - debugfs refcount fixes
   - dma-fence free with pending signals fix
   - each dma-buf gets an inode

  Panels:
   - Lots of additional panel bindings

  amdgpu:
   - initial navi10 support
   - avoid hw reset
   - HDR metadata support
   - new thermal sensors for vega asics
   - RAS fixes
   - use HMM rather than MMU notifier
   - xgmi topology via kfd
   - SR-IOV fixes
   - driver reload fixes
   - DC use a core bpc attribute
   - Aux fixes for DC
   - Bandwidth calc updates for DC
   - Clock handling refactor
   - kfd VEGAM support

  vmwgfx:
   - Coherent memory support changes

  i915:
   - HDR Support
   - HDMI i2c link
   - Icelake multi-segmented gamma support
   - GuC firmware update
   - Mule Creek Canyon PCH support for EHL
   - EHL platform updtes
   - move i915.alpha_support to i915.force_probe
   - runtime PM refactoring
   - VBT parsing refactoring
   - DSI fixes
   - struct mutex dependency reduction
   - GEM code reorg

  mali-dp:
   - Komeda driver features

  msm:
   - dsi vs EPROBE_DEFER fixes
   - msm8998 snapdragon 835 support
   - a540 gpu support
   - mdp5 and dpu interconnect support

  exynos:
   - drmP.h removal

  tegra:
   - misc fixes

  tda998x:
   - audio support improvements
   - pixel repeated mode support
   - quantisation range handling corrections
   - HDMI vendor info fix

  armada:
   - interlace support fix
   - overlay/video plane register handling refactor
   - add gamma support

  rockchip:
   - RX3328 support

  panfrost:
   - expose perf counters via hidden ioctls

  vkms:
   - enumerate CRC sources list

  ast:
   - rework BO handling

  mgag200:
   - rework BO handling

  dw-hdmi:
   - suspend/resume support

  rcar-du:
   - R8A774A1 Soc Support
   - LVDS dual-link mode support
   - Additional formats
   - Misc fixes

  omapdrm:
   - DSI command mode display support

  stm
   - fb modifier support
   - runtime PM support

  sun4i:
   - use vmap ops

  vc4:
   - binner bo binding rework

  v3d:
   - compute shader support
   - resync/sync fixes
   - job management refactoring

  lima:
   - NULL pointer in irq handler fix
   - scheduler default timeout

  virtio:
   - fence seqno support
   - trace events

  bochs:
   - misc fixes

  tc458767:
   - IRQ/HDP handling

  sii902x:
   - HDMI audio support

  atmel-hlcdc:
   - misc fixes

  meson:
   - zpos support"

* tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm: (1815 commits)
  Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"
  Revert "mm: adjust apply_to_pfn_range interface for dropped token."
  mm: adjust apply_to_pfn_range interface for dropped token.
  drm/amdgpu/navi10: add uclk activity sensor
  drm/amdgpu: properly guard the generic discovery code
  drm/amdgpu: add missing documentation on new module parameters
  drm/amdgpu: don't invalidate caches in RELEASE_MEM, only do the writeback
  drm/amd/display: avoid 64-bit division
  drm/amdgpu/psp11: simplify the ucode register logic
  drm/amdgpu: properly guard DC support in navi code
  drm/amd/powerplay: vega20: fix uninitialized variable use
  drm/amd/display: dcn20: include linux/delay.h
  amdgpu: make pmu support optional
  drm/amd/powerplay: Zero initialize current_rpm in vega20_get_fan_speed_percent
  drm/amd/powerplay: Zero initialize freq in smu_v11_0_get_current_clk_freq
  drm/amd/powerplay: Use memset to initialize metrics structs
  drm/amdgpu/mes10.1: Fix header guard
  drm/amd/powerplay: add temperature sensor support for navi10
  drm/amdgpu: fix scheduler timeout calc
  drm/amdgpu: Prepare for hmm_range_register API change (v2)
  ...
This commit is contained in:
Linus Torvalds
2019-07-15 19:04:27 -07:00
1567 changed files with 475768 additions and 34599 deletions

View File

@@ -4,8 +4,8 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o

View File

@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_damage_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -49,23 +50,10 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
unsigned int num_clips)
{
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
.dirty = drm_atomic_helper_dirtyfb,
};
int
@@ -85,10 +73,6 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = NULL;
return ret;
}
spin_lock_init(&vgfb->dirty_lock);
vgfb->x1 = vgfb->y1 = INT_MAX;
vgfb->x2 = vgfb->y2 = 0;
return 0;
}

View File

@@ -102,7 +102,6 @@ struct virtio_gpu_fence {
struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
};
#define to_virtio_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
@@ -143,9 +142,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
struct virtio_gpu_fence *fence;
};
#define to_virtio_gpu_framebuffer(x) \
@@ -255,10 +251,6 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
/* virtio_fb */
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
struct drm_clip_rect *clips,
unsigned int num_clips);
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
@@ -356,7 +348,7 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,

View File

@@ -1,150 +0,0 @@
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "virtgpu_drv.h"
static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
bool store, int x, int y,
int width, int height)
{
struct drm_device *dev = fb->base.dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
bool store_for_later = false;
int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
if ((width <= 0) ||
(x + width > fb->base.width) ||
(y + height > fb->base.height)) {
DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
width, height, x, y,
fb->base.width, fb->base.height);
return -EINVAL;
}
/*
* Can be called with pretty much any context (console output
* path). If we are in atomic just store the dirty rect info
* to send out the update later.
*
* Can't test inside spin lock.
*/
if (in_atomic() || store)
store_for_later = true;
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&fb->dirty_lock, flags);
if (fb->y1 < y)
y = fb->y1;
if (fb->y2 > y2)
y2 = fb->y2;
if (fb->x1 < x)
x = fb->x1;
if (fb->x2 > x2)
x2 = fb->x2;
if (store_for_later) {
fb->x1 = x;
fb->x2 = x2;
fb->y1 = y;
fb->y2 = y2;
spin_unlock_irqrestore(&fb->dirty_lock, flags);
return 0;
}
fb->x1 = fb->y1 = INT_MAX;
fb->x2 = fb->y2 = 0;
spin_unlock_irqrestore(&fb->dirty_lock, flags);
{
uint32_t offset;
uint32_t w = x2 - x + 1;
uint32_t h = y2 - y + 1;
offset = (y * fb->base.pitches[0]) + x * bpp;
virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
offset,
cpu_to_le32(w),
cpu_to_le32(h),
cpu_to_le32(x),
cpu_to_le32(y),
NULL);
}
virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
x, y, x2 - x + 1, y2 - y + 1);
return 0;
}
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
struct drm_clip_rect *clips,
unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
struct drm_clip_rect norect;
struct drm_clip_rect *clips_ptr;
int left, right, top, bottom;
int i;
int inc = 1;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = vgfb->base.width;
norect.y2 = vgfb->base.height;
}
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
if (obj->dumb)
return virtio_gpu_dirty_update(vgfb, false, left, top,
right - left, bottom - top);
virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
left, top, right - left, bottom - top);
return 0;
}

View File

@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include <trace/events/dma_fence.h>
#include "virtgpu_drv.h"
static const char *virtio_get_driver_name(struct dma_fence *f)
@@ -40,16 +41,14 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
}
static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
snprintf(str, size, "%llu", f->seqno);
}
static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
@@ -71,17 +70,22 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_ATOMIC);
GFP_KERNEL);
if (!fence)
return fence;
fence->drv = drv;
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
return fence;
}
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence)
{
@@ -89,14 +93,15 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
fence->seq = ++drv->sync_seq;
fence->f.seqno = ++drv->sync_seq;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
trace_dma_fence_emit(&fence->f);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0;
cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -109,7 +114,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
if (last_seq < fence->f.seqno)
continue;
dma_fence_signal_locked(&fence->f);
list_del(&fence->node);

View File

@@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct virtio_gpu_object *qobj;
int ret;
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
if (ret != 0)
return ret;
@@ -168,7 +168,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
@@ -195,8 +195,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_free;
buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
exbuf->size);
buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -263,10 +262,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
default:
return -EINVAL;
}
if (copy_to_user((void __user *)(unsigned long)param->value,
&value, sizeof(int))) {
if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
return -EFAULT;
}
return 0;
}
@@ -526,7 +524,6 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->id == args->cap_set_id &&
cache_ent->version == args->cap_set_ver) {
ptr = cache_ent->caps_cache;
spin_unlock(&vgdev->display_info_lock);
goto copy_exit;
}
@@ -537,15 +534,18 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
atomic_read(&cache_ent->is_valid), 5 * HZ);
if (!ret)
return -EBUSY;
/* is_valid check must proceed before copy of the cache entry. */
smp_rmb();
ptr = cache_ent->caps_cache;
copy_exit:
if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
return -EFAULT;
return 0;
@@ -553,34 +553,34 @@ copy_exit:
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
* thread these in the underlying GL
*/
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
};

View File

@@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_VIRTGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _VIRTGPU_TRACE_H_
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM virtio_gpu
#define TRACE_INCLUDE_FILE virtgpu_trace
DECLARE_EVENT_CLASS(virtio_gpu_cmd,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
TP_ARGS(vq, hdr),
TP_STRUCT__entry(
__field(int, dev)
__field(unsigned int, vq)
__field(const char *, name)
__field(u32, type)
__field(u32, flags)
__field(u64, fence_id)
__field(u32, ctx_id)
),
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
__entry->name = vq->name;
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
__entry->ctx_id = le32_to_cpu(hdr->ctx_id);
),
TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
__entry->dev, __entry->vq, __entry->name,
__entry->type, __entry->flags, __entry->fence_id,
__entry->ctx_id)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
TP_ARGS(vq, hdr)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
TP_ARGS(vq, hdr)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/virtio
#include <trace/define_trace.h>

View File

@@ -0,0 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
#define CREATE_TRACE_POINTS
#include "virtgpu_trace.h"

View File

@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
@@ -192,6 +193,9 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
struct virtio_gpu_ctrl_hdr *cmd;
@@ -284,6 +288,9 @@ retry:
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq);
}
@@ -359,6 +366,9 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq);
}
@@ -583,12 +593,14 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size);
/* Copy must occur before is_valid is signalled. */
smp_wmb();
atomic_set(&cache_ent->is_valid, 1);
break;
}
}
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
wake_up_all(&vgdev->resp_wq);
}
static int virtio_get_edid_block(void *data, u8 *buf,
@@ -684,8 +696,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
struct virtio_gpu_drv_cap_cache *search_ent;
void *resp_buf;
*cache_p = NULL;
if (idx >= vgdev->num_capsets)
return -EINVAL;
@@ -716,9 +731,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
atomic_set(&cache_ent->is_valid, 0);
cache_ent->size = max_size;
spin_lock(&vgdev->display_info_lock);
list_add_tail(&cache_ent->head, &vgdev->cap_cache);
/* Search while under lock in case it was added by another task. */
list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
if (search_ent->id == vgdev->capsets[idx].id &&
search_ent->version == version) {
*cache_p = search_ent;
break;
}
}
if (!*cache_p)
list_add_tail(&cache_ent->head, &vgdev->cap_cache);
spin_unlock(&vgdev->display_info_lock);
if (*cache_p) {
/* Entry was found, so free everything that was just created. */
kfree(resp_buf);
kfree(cache_ent->caps_cache);
kfree(cache_ent);
return 0;
}
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_capset) + max_size,