Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"I Was Almost Tempted To Capitalise Every Word, but then I decided I
couldn't read it myself!
I've also got one pull request for the sti driver outstanding. It
relied on a commit in Greg's tree and I didn't find out in time, that
commit is in your tree now so I might send that along once this is
merged.
I also had the accidental misfortune to have access to a Skylake on my
desk for a few days, and I've had to encourage Intel to try harder,
which seems to be happening now.
Here is the main drm-next pull request for 4.4.
Highlights:
New driver:
vc4 driver for the Rasberry Pi VPU.
(From Eric Anholt at Broadcom.)
Core:
Atomic fbdev support
Atomic helpers for runtime pm
dp/aux i2c STATUS_UPDATE handling
struct_mutex usage cleanups.
Generic of probing support.
Documentation:
Kerneldoc for VGA switcheroo code.
Rename to gpu instead of drm to reflect scope.
i915:
Skylake GuC firmware fixes
HPD A support
VBT backlight fallbacks
Fastboot by default for some systems
FBC work
BXT/SKL workarounds
Skylake deeper sleep state fixes
amdgpu:
Enable GPU scheduler by default
New atombios opcodes
GPUVM debugging options
Stoney support.
Fencing cleanups.
radeon:
More efficient CS checking
nouveau:
gk20a instance memory handling improvements.
Improved PGOB detection and GK107 support
Kepler GDDR5 PLL statbility improvement
G8x/GT2xx reclock improvements
new userspace API compatiblity fixes.
virtio-gpu:
Add 3D support - qemu 2.5 has it merged for it's gtk backend.
msm:
Initial msm88896 (snapdragon 8200)
exynos:
HDMI cleanups
Enable mixer driver byt default
Add DECON-TV support
vmwgfx:
Move to using memremap + fixes.
rcar-du:
Add support for R8A7793/4 DU
armada:
Remove support for non-component mode
Improved plane handling
Power savings while in DPMS off.
tda998x:
Remove unused slave encoder support
Use more HDMI helpers
Fix EDID read handling
dwhdmi:
Interlace video mode support for ipu-v3/dw_hdmi
Hotplug state fixes
Audio driver integration
imx:
More color formats support.
tegra:
Minor fixes/improvements"
[ Merge fixup: remove unused variable 'dev' that had all uses removed in
commit 4e270f0880
: "drm/gem: Drop struct_mutex requirement from
drm_gem_mmap_obj" ]
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (764 commits)
drm/vmwgfx: Relax irq locking somewhat
drm/vmwgfx: Properly flush cursor updates and page-flips
drm/i915/skl: disable display side power well support for now
drm/i915: Extend DSL readout fix to BDW and SKL.
drm/i915: Do graphics device reset under forcewake
drm/i915: Skip fence installation for objects with rotated views (v4)
vga_switcheroo: Drop client power state VGA_SWITCHEROO_INIT
drm/amdgpu: group together common fence implementation
drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE
drm/amdgpu: remove now unused fence functions
drm/amdgpu: fix fence fallback check
drm/amdgpu: fix stoping the scheduler timeout
drm/amdgpu: cleanup on error in amdgpu_cs_ioctl()
drm/i915: Fix locking around GuC firmware load
drm/amdgpu: update Fiji's Golden setting
drm/amdgpu: update Fiji's rev id
drm/amdgpu: extract common code in vi_common_early_init
drm/amd/scheduler: don't oops on failure to load
drm/amdgpu: don't oops on failure to load (v2)
drm/amdgpu: don't VT switch on suspend
...
Esse commit está contido em:
@@ -146,73 +146,73 @@
|
||||
|
||||
static const struct drm_ioctl_desc vmw_ioctls[] = {
|
||||
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
|
||||
vmw_kms_cursor_bypass_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
|
||||
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
|
||||
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
|
||||
vmw_fence_obj_signaled_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
/* these allow direct access to the framebuffers mark as master only */
|
||||
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
|
||||
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_AUTH),
|
||||
VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
|
||||
vmw_present_readback_ioctl,
|
||||
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
|
||||
DRM_MASTER | DRM_AUTH),
|
||||
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
|
||||
vmw_kms_update_layout_ioctl,
|
||||
DRM_MASTER | DRM_UNLOCKED),
|
||||
DRM_MASTER),
|
||||
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
|
||||
vmw_shader_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
|
||||
vmw_shader_destroy_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
|
||||
vmw_gb_surface_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
|
||||
vmw_gb_surface_reference_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_SYNCCPU,
|
||||
vmw_user_dmabuf_synccpu_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
|
||||
vmw_extended_context_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static struct pci_device_id vmw_pci_id_list[] = {
|
||||
@@ -643,7 +643,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
init_waitqueue_head(&dev_priv->fence_queue);
|
||||
init_waitqueue_head(&dev_priv->fifo_queue);
|
||||
dev_priv->fence_queue_waiters = 0;
|
||||
atomic_set(&dev_priv->fifo_queue_waiters, 0);
|
||||
dev_priv->fifo_queue_waiters = 0;
|
||||
|
||||
dev_priv->used_memory_size = 0;
|
||||
|
||||
@@ -752,8 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
|
||||
dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, MEMREMAP_WB);
|
||||
|
||||
if (unlikely(dev_priv->mmio_virt == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
@@ -907,7 +907,7 @@ out_no_irq:
|
||||
out_no_device:
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
out_err4:
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
memunmap(dev_priv->mmio_virt);
|
||||
out_err3:
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
@@ -958,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
|
||||
pci_release_regions(dev->pdev);
|
||||
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
memunmap(dev_priv->mmio_virt);
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
@@ -1061,14 +1061,6 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
|
||||
}
|
||||
mutex_unlock(&dev->master_mutex);
|
||||
|
||||
/*
|
||||
* Taking the drm_global_mutex after the TTM lock might deadlock
|
||||
*/
|
||||
if (!(flags & DRM_UNLOCKED)) {
|
||||
DRM_ERROR("Refusing locked ioctl access.\n");
|
||||
return ERR_PTR(-EDEADLK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Take the TTM lock. Possibly sleep waiting for the authenticating
|
||||
* master to become master again, or for a SIGTERM if the
|
||||
|
@@ -375,7 +375,7 @@ struct vmw_private {
|
||||
uint32_t stdu_max_height;
|
||||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
u32 __iomem *mmio_virt;
|
||||
u32 *mmio_virt;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
@@ -440,13 +440,12 @@ struct vmw_private {
|
||||
spinlock_t waiter_lock;
|
||||
int fence_queue_waiters; /* Protected by waiter_lock */
|
||||
int goal_queue_waiters; /* Protected by waiter_lock */
|
||||
int cmdbuf_waiters; /* Protected by irq_lock */
|
||||
int error_waiters; /* Protected by irq_lock */
|
||||
atomic_t fifo_queue_waiters;
|
||||
int cmdbuf_waiters; /* Protected by waiter_lock */
|
||||
int error_waiters; /* Protected by waiter_lock */
|
||||
int fifo_queue_waiters; /* Protected by waiter_lock */
|
||||
uint32_t last_read_seqno;
|
||||
spinlock_t irq_lock;
|
||||
struct vmw_fence_manager *fman;
|
||||
uint32_t irq_mask;
|
||||
uint32_t irq_mask; /* Updates protected by waiter_lock */
|
||||
|
||||
/*
|
||||
* Device state
|
||||
@@ -914,9 +913,9 @@ void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
int vmw_enable_vblank(struct drm_device *dev, int crtc);
|
||||
void vmw_disable_vblank(struct drm_device *dev, int crtc);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
|
||||
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
|
||||
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
|
||||
int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
@@ -1206,4 +1205,30 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
|
||||
{
|
||||
atomic_dec(&dev_priv->num_fifo_resources);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_mmio_read - Perform a MMIO read from volatile memory
|
||||
*
|
||||
* @addr: The address to read from
|
||||
*
|
||||
* This function is intended to be equivalent to ioread32() on
|
||||
* memremap'd memory, but without byteswapping.
|
||||
*/
|
||||
static inline u32 vmw_mmio_read(u32 *addr)
|
||||
{
|
||||
return READ_ONCE(*addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_mmio_write - Perform a MMIO write to volatile memory
|
||||
*
|
||||
* @addr: The address to write to
|
||||
*
|
||||
* This function is intended to be equivalent to iowrite32 on
|
||||
* memremap'd memory, but without byteswapping.
|
||||
*/
|
||||
static inline void vmw_mmio_write(u32 value, u32 *addr)
|
||||
{
|
||||
WRITE_ONCE(*addr, value);
|
||||
}
|
||||
#endif
|
||||
|
@@ -142,8 +142,8 @@ static bool vmw_fence_enable_signaling(struct fence *f)
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
struct vmw_private *dev_priv = fman->dev_priv;
|
||||
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
|
||||
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
|
||||
return false;
|
||||
|
||||
@@ -386,14 +386,14 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
||||
u32 passed_seqno)
|
||||
{
|
||||
u32 goal_seqno;
|
||||
u32 __iomem *fifo_mem;
|
||||
u32 *fifo_mem;
|
||||
struct vmw_fence_obj *fence;
|
||||
|
||||
if (likely(!fman->seqno_valid))
|
||||
return false;
|
||||
|
||||
fifo_mem = fman->dev_priv->mmio_virt;
|
||||
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
|
||||
return false;
|
||||
|
||||
@@ -401,8 +401,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
||||
list_for_each_entry(fence, &fman->fence_list, head) {
|
||||
if (!list_empty(&fence->seq_passed_actions)) {
|
||||
fman->seqno_valid = true;
|
||||
iowrite32(fence->base.seqno,
|
||||
fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
vmw_mmio_write(fence->base.seqno,
|
||||
fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -430,18 +430,18 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
|
||||
{
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
u32 goal_seqno;
|
||||
u32 __iomem *fifo_mem;
|
||||
u32 *fifo_mem;
|
||||
|
||||
if (fence_is_signaled_locked(&fence->base))
|
||||
return false;
|
||||
|
||||
fifo_mem = fman->dev_priv->mmio_virt;
|
||||
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
if (likely(fman->seqno_valid &&
|
||||
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
|
||||
return false;
|
||||
|
||||
iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
|
||||
fman->seqno_valid = true;
|
||||
|
||||
return true;
|
||||
@@ -453,9 +453,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
|
||||
struct list_head action_list;
|
||||
bool needs_rerun;
|
||||
uint32_t seqno, new_seqno;
|
||||
u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = fman->dev_priv->mmio_virt;
|
||||
|
||||
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
|
||||
rerun:
|
||||
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
|
||||
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
|
||||
@@ -477,7 +477,7 @@ rerun:
|
||||
|
||||
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
|
||||
if (unlikely(needs_rerun)) {
|
||||
new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
|
||||
if (new_seqno != seqno) {
|
||||
seqno = new_seqno;
|
||||
goto rerun;
|
||||
|
@@ -36,7 +36,7 @@ struct vmw_temp_set_context {
|
||||
|
||||
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t fifo_min, hwversion;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
@@ -60,15 +60,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
|
||||
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
|
||||
return false;
|
||||
|
||||
hwversion = ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
hwversion = vmw_mmio_read(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
|
||||
if (hwversion == 0)
|
||||
return false;
|
||||
@@ -85,13 +85,13 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
|
||||
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t caps;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
|
||||
return true;
|
||||
|
||||
@@ -100,7 +100,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
||||
|
||||
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
|
||||
@@ -137,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
if (min < PAGE_SIZE)
|
||||
min = PAGE_SIZE;
|
||||
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
|
||||
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
|
||||
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
|
||||
vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
|
||||
wmb();
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
|
||||
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
|
||||
vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
|
||||
mb();
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
|
||||
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
|
||||
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
|
||||
(unsigned int) max,
|
||||
@@ -157,7 +157,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
||||
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
||||
vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
||||
vmw_marker_queue_init(&fifo->marker_queue);
|
||||
|
||||
return 0;
|
||||
@@ -165,31 +165,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
|
||||
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
static DEFINE_SPINLOCK(ping_lock);
|
||||
unsigned long irq_flags;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
/*
|
||||
* The ping_lock is needed because we don't have an atomic
|
||||
* test-and-set of the SVGA_FIFO_BUSY register.
|
||||
*/
|
||||
spin_lock_irqsave(&ping_lock, irq_flags);
|
||||
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
|
||||
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
|
||||
preempt_disable();
|
||||
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
||||
}
|
||||
spin_unlock_irqrestore(&ping_lock, irq_flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
;
|
||||
|
||||
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
|
||||
dev_priv->config_done_state);
|
||||
@@ -213,11 +205,11 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
|
||||
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
|
||||
uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
|
||||
|
||||
return ((max - next_cmd) + (stop - min) <= bytes);
|
||||
}
|
||||
@@ -260,7 +252,6 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
unsigned long timeout)
|
||||
{
|
||||
long ret = 1L;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
|
||||
return 0;
|
||||
@@ -270,16 +261,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
return vmw_fifo_wait_noirq(dev_priv, bytes,
|
||||
interruptible, timeout);
|
||||
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
&dev_priv->fifo_queue_waiters);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
@@ -295,14 +278,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
&dev_priv->fifo_queue_waiters);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -321,7 +298,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
||||
uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t next_cmd;
|
||||
@@ -329,9 +306,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
||||
if (unlikely(bytes >= (max - min)))
|
||||
goto out_err;
|
||||
@@ -342,7 +319,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
||||
fifo_state->reserved_size = bytes;
|
||||
|
||||
while (1) {
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
|
||||
bool need_bounce = false;
|
||||
bool reserve_in_place = false;
|
||||
|
||||
@@ -376,8 +353,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
||||
fifo_state->using_bounce_buffer = false;
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
vmw_mmio_write(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
return (void __force *) (fifo_mem +
|
||||
(next_cmd >> 2));
|
||||
} else {
|
||||
@@ -427,7 +404,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
|
||||
}
|
||||
|
||||
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
u32 __iomem *fifo_mem,
|
||||
u32 *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
@@ -439,17 +416,16 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
if (bytes < chunk_size)
|
||||
chunk_size = bytes;
|
||||
|
||||
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
|
||||
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
|
||||
rest = bytes - chunk_size;
|
||||
if (rest)
|
||||
memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
|
||||
rest);
|
||||
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
|
||||
}
|
||||
|
||||
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
u32 __iomem *fifo_mem,
|
||||
u32 *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
@@ -457,12 +433,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
while (bytes > 0) {
|
||||
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
|
||||
vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
|
||||
next_cmd += sizeof(uint32_t);
|
||||
if (unlikely(next_cmd == max))
|
||||
next_cmd = min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
mb();
|
||||
bytes -= sizeof(uint32_t);
|
||||
}
|
||||
@@ -471,10 +447,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
|
||||
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
|
||||
if (fifo_state->dx)
|
||||
@@ -507,11 +483,11 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
if (next_cmd >= max)
|
||||
next_cmd -= max - min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
}
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
|
@@ -64,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_HW_VERSION:
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
|
||||
@@ -73,11 +73,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
param->value =
|
||||
ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
vmw_mmio_read(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
break;
|
||||
}
|
||||
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
|
||||
@@ -122,6 +122,22 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
|
||||
{
|
||||
/* If the header is updated, update the format test as well! */
|
||||
BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
|
||||
|
||||
if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
|
||||
cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
|
||||
fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
|
||||
SVGADX_DXFMT_MULTISAMPLE_4 |
|
||||
SVGADX_DXFMT_MULTISAMPLE_8);
|
||||
else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
|
||||
return 0;
|
||||
|
||||
return fmt_value;
|
||||
}
|
||||
|
||||
static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
|
||||
size_t size)
|
||||
{
|
||||
@@ -147,7 +163,8 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
|
||||
for (i = 0; i < max_size; ++i) {
|
||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
|
||||
compat_cap->pairs[i][0] = i;
|
||||
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||
compat_cap->pairs[i][1] = vmw_mask_multisample
|
||||
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
|
||||
}
|
||||
spin_unlock(&dev_priv->cap_lock);
|
||||
|
||||
@@ -162,7 +179,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
(struct drm_vmw_get_3d_cap_arg *) data;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t size;
|
||||
u32 __iomem *fifo_mem;
|
||||
u32 *fifo_mem;
|
||||
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
|
||||
void *bounce;
|
||||
int ret;
|
||||
@@ -202,7 +219,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
spin_lock(&dev_priv->cap_lock);
|
||||
for (i = 0; i < num; ++i) {
|
||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
|
||||
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||
*bounce32++ = vmw_mask_multisample
|
||||
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
|
||||
}
|
||||
spin_unlock(&dev_priv->cap_lock);
|
||||
} else if (gb_objects) {
|
||||
@@ -211,7 +229,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
goto out_err;
|
||||
} else {
|
||||
fifo_mem = dev_priv->mmio_virt;
|
||||
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
|
||||
memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
|
||||
}
|
||||
|
||||
ret = copy_to_user(buffer, bounce, size);
|
||||
|
@@ -36,15 +36,13 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status, masked_status;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
masked_status = status & dev_priv->irq_mask;
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
masked_status = status & READ_ONCE(dev_priv->irq_mask);
|
||||
|
||||
if (likely(status))
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
|
||||
if (!masked_status)
|
||||
if (!status)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
|
||||
@@ -72,8 +70,8 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
|
||||
void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
if (dev_priv->last_read_seqno != seqno) {
|
||||
dev_priv->last_read_seqno = seqno;
|
||||
@@ -178,8 +176,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
}
|
||||
finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle) {
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
}
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
out_err:
|
||||
@@ -189,65 +188,51 @@ out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
spin_lock_bh(&dev_priv->waiter_lock);
|
||||
if ((*waiter_count)++ == 0) {
|
||||
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_bh(&dev_priv->waiter_lock);
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
spin_lock_bh(&dev_priv->waiter_lock);
|
||||
if (--(*waiter_count) == 0) {
|
||||
dev_priv->irq_mask &= ~flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_bh(&dev_priv->waiter_lock);
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (dev_priv->fence_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_ANY_FENCE,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
|
||||
&dev_priv->fence_queue_waiters);
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (--dev_priv->fence_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
|
||||
&dev_priv->fence_queue_waiters);
|
||||
}
|
||||
|
||||
|
||||
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (dev_priv->goal_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FENCE_GOAL,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
|
||||
&dev_priv->goal_queue_waiters);
|
||||
}
|
||||
|
||||
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->waiter_lock);
|
||||
if (--dev_priv->goal_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
spin_unlock(&dev_priv->waiter_lock);
|
||||
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
|
||||
&dev_priv->goal_queue_waiters);
|
||||
}
|
||||
|
||||
int vmw_wait_seqno(struct vmw_private *dev_priv,
|
||||
@@ -304,7 +289,6 @@ void vmw_irq_preinstall(struct drm_device *dev)
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
@@ -327,30 +311,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
if ((*waiter_count)++ == 0) {
|
||||
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
if (--(*waiter_count) == 0) {
|
||||
dev_priv->irq_mask &= ~flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
|
@@ -78,7 +78,7 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
cmd->cursor.hotspotX = hotspotX;
|
||||
cmd->cursor.hotspotY = hotspotY;
|
||||
|
||||
vmw_fifo_commit(dev_priv, cmd_size);
|
||||
vmw_fifo_commit_flush(dev_priv, cmd_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -123,14 +123,14 @@ err_unreserve:
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y)
|
||||
{
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t count;
|
||||
|
||||
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
|
||||
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
|
||||
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
|
||||
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
|
||||
vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
|
||||
vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
|
||||
count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
}
|
||||
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
@@ -1155,7 +1155,8 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
|
||||
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
|
||||
else if (vmw_fifo_have_pitchlock(vmw_priv))
|
||||
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
|
||||
vmw_mmio_write(pitch, vmw_priv->mmio_virt +
|
||||
SVGA_FIFO_PITCHLOCK);
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
|
||||
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
|
||||
@@ -1181,8 +1182,8 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
|
||||
vmw_priv->vga_pitchlock =
|
||||
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
|
||||
else if (vmw_fifo_have_pitchlock(vmw_priv))
|
||||
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
|
||||
SVGA_FIFO_PITCHLOCK);
|
||||
vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
|
||||
SVGA_FIFO_PITCHLOCK);
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
|
||||
return 0;
|
||||
@@ -1230,8 +1231,8 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
|
||||
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
|
||||
vmw_priv->vga_pitchlock);
|
||||
else if (vmw_fifo_have_pitchlock(vmw_priv))
|
||||
iowrite32(vmw_priv->vga_pitchlock,
|
||||
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
|
||||
vmw_mmio_write(vmw_priv->vga_pitchlock,
|
||||
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
|
||||
return 0;
|
||||
@@ -1263,7 +1264,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
/**
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -1271,7 +1272,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
/**
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
int vmw_enable_vblank(struct drm_device *dev, int crtc)
|
||||
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
@@ -1279,7 +1280,7 @@ int vmw_enable_vblank(struct drm_device *dev, int crtc)
|
||||
/**
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
void vmw_disable_vblank(struct drm_device *dev, int crtc)
|
||||
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -717,6 +717,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
|
||||
&event->event.tv_usec,
|
||||
true);
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
} else {
|
||||
vmw_fifo_flush(dev_priv, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@@ -1291,6 +1291,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
uint32_t size;
|
||||
uint32_t backup_handle;
|
||||
|
||||
if (req->multisample_count != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
|
Referência em uma nova issue
Block a user