Merge tag 'omap-for-v3.9/usb-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/soc
These changes contain the OMAP USB related platform data changes
that were dropped from linux next because of the merge conflicts
as requested by me and Olof. The reason was that at this point
we really should be able to do the arch/arm related changes
separately from driver changes to avoid dependencies between
branches.
These patches were initially part of the USB related MFD patches.
Based on our comments, Roger Quadros quickly reworked these
patches into a shared branch between ARM SoC tree and the MFD
tree, then separate patches for the OMAP platform data and
MFD driver.
Note that this branch will conflict with c1d1cd597f
("ARM: OMAP2+: omap_device: remove obsolete pm_lats and
early_device code"). Please see http://lkml.org/lkml/2013/2/11/16
for the merge resolution.
[arnd - resolved the merge conflict]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
||||
radeon_wait_for_vblank(rdev, i);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
} else {
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
|
||||
radeon_wait_for_vblank(rdev, i);
|
||||
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
}
|
||||
/* wait for the next frame */
|
||||
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||
blackout &= ~BLACKOUT_MODE_MASK;
|
||||
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
||||
}
|
||||
/* wait for the MC to settle */
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
||||
if (ASIC_IS_DCE6(rdev)) {
|
||||
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
} else {
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
/* wait for the next frame */
|
||||
frame_count = radeon_get_vblank_counter(rdev, i);
|
||||
@@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(DMA_TILING_CONFIG, gb_addr_config);
|
||||
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||
if ((rdev->config.evergreen.max_backends == 1) &&
|
||||
(rdev->flags & RADEON_IS_IGP)) {
|
||||
if ((disabled_rb_mask & 3) == 1) {
|
||||
/* RB0 disabled, RB1 enabled */
|
||||
tmp = 0x11111111;
|
||||
} else {
|
||||
/* RB1 disabled, RB0 enabled */
|
||||
tmp = 0x00000000;
|
||||
}
|
||||
} else {
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||
}
|
||||
WREG32(GB_BACKEND_MAP, tmp);
|
||||
|
||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||
|
@@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||
u32 disabled_rb_mask)
|
||||
{
|
||||
u32 rendering_pipe_num, rb_num_width, req_rb_num;
|
||||
u32 pipe_rb_ratio, pipe_rb_remain;
|
||||
u32 pipe_rb_ratio, pipe_rb_remain, tmp;
|
||||
u32 data = 0, mask = 1 << (max_rb_num - 1);
|
||||
unsigned i, j;
|
||||
|
||||
/* mask out the RBs that don't exist on that asic */
|
||||
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
|
||||
tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
|
||||
/* make sure at least one RB is available */
|
||||
if ((tmp & 0xff) != 0xff)
|
||||
disabled_rb_mask = tmp;
|
||||
|
||||
rendering_pipe_num = 1 << tiling_pipe_num;
|
||||
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
|
||||
|
@@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
|
||||
.vm = {
|
||||
.init = &si_vm_init,
|
||||
.fini = &si_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &si_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
|
@@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
||||
1),
|
||||
ATOM_DEVICE_CRT1_SUPPORT);
|
||||
}
|
||||
/* RV100 board with external TDMS bit mis-set.
|
||||
* Actually uses internal TMDS, clear the bit.
|
||||
*/
|
||||
if (dev->pdev->device == 0x5159 &&
|
||||
dev->pdev->subsystem_vendor == 0x1014 &&
|
||||
dev->pdev->subsystem_device == 0x029A) {
|
||||
tmp &= ~(1 << 4);
|
||||
}
|
||||
if ((tmp >> 4) & 0x1) {
|
||||
devices |= ATOM_DEVICE_DFP2_SUPPORT;
|
||||
radeon_add_legacy_encoder(dev,
|
||||
|
@@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
||||
}
|
||||
|
||||
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
||||
if (radeon_fb == NULL)
|
||||
if (radeon_fb == NULL) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
|
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
|
||||
{
|
||||
int r;
|
||||
|
||||
/* make sure we aren't trying to allocate more space than there is on the ring */
|
||||
if (ndw > (ring->ring_size / 4))
|
||||
return -ENOMEM;
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
|
@@ -1,5 +1,6 @@
|
||||
cayman 0x9400
|
||||
0x0000802C GRBM_GFX_INDEX
|
||||
0x00008040 WAIT_UNTIL
|
||||
0x000084FC CP_STRMOUT_CNTL
|
||||
0x000085F0 CP_COHER_CNTL
|
||||
0x000085F4 CP_COHER_SIZE
|
||||
|
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||
WREG32(R600_CITF_CNTL, blackout);
|
||||
}
|
||||
}
|
||||
/* wait for the MC to settle */
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||
|
@@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
||||
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
fbo->vm_node = NULL;
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
spin_lock(&bdev->fence_lock);
|
||||
if (bo->sync_obj)
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
else
|
||||
fbo->sync_obj = NULL;
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
@@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
|
||||
/* ttm_buffer_object_transfer accesses bo->sync_obj */
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
Reference in New Issue
Block a user