Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next
Backmerge Linus master to get the connector locking revert. * 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux: (645 commits) sysctl: fix proc_doulongvec_ms_jiffies_minmax() Revert "drm/probe-helpers: Drop locking from poll_enable" MAINTAINERS: add Dan Streetman to zbud maintainers MAINTAINERS: add Dan Streetman to zswap maintainers mm: do not export ioremap_page_range symbol for external module mn10300: fix build error of missing fpu_save() romfs: use different way to generate fsid for BLOCK or MTD frv: add missing atomic64 operations mm, page_alloc: fix premature OOM when racing with cpuset mems update mm, page_alloc: move cpuset seqcount checking to slowpath mm, page_alloc: fix fast-path race with cpuset update or removal mm, page_alloc: fix check for NULL preferred_zone kernel/panic.c: add missing \n fbdev: color map copying bounds checking frv: add atomic64_add_unless() mm/mempolicy.c: do not put mempolicy before using its nodemask radix-tree: fix private list warnings Documentation/filesystems/proc.txt: add VmPin mm, memcg: do not retry precharge charges proc: add a schedule point in proc_pid_readdir() ...
Dieser Commit ist enthalten in:
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
void adreno_flush(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
uint32_t wptr = get_wptr(gpu->rb);
|
||||
uint32_t wptr;
|
||||
|
||||
/*
|
||||
* Mask wptr value that we calculate to fit in the HW range. This is
|
||||
* to account for the possibility that the last command fit exactly into
|
||||
* the ringbuffer and rb->next hasn't wrapped to zero yet
|
||||
*/
|
||||
wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
|
||||
|
||||
/* ensure writes to ringbuffer have hit system memory: */
|
||||
mb();
|
||||
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
{
|
||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
struct msm_mmu *mmu;
|
||||
int ret;
|
||||
|
||||
adreno_gpu->funcs = funcs;
|
||||
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu = gpu->aspace->mmu;
|
||||
if (mmu) {
|
||||
if (gpu->aspace && gpu->aspace->mmu) {
|
||||
struct msm_mmu *mmu = gpu->aspace->mmu;
|
||||
ret = mmu->funcs->attach(mmu, iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
if (ret)
|
||||
|
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
|
||||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
for_each_plane_in_state(state, plane, plane_state, i)
|
||||
mdp5_plane_complete_commit(plane, plane_state);
|
||||
|
||||
if (mdp5_kms->smp)
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
|
||||
|
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
|
||||
|
||||
/* assigned by crtc blender */
|
||||
enum mdp_mixer_stage_id stage;
|
||||
|
||||
bool pending : 1;
|
||||
};
|
||||
#define to_mdp5_plane_state(x) \
|
||||
container_of(x, struct mdp5_plane_state, base)
|
||||
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
|
||||
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
|
||||
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
|
||||
|
||||
|
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
|
||||
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
|
||||
drm_printf(p, "\talpha=%u\n", pstate->alpha);
|
||||
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
|
||||
drm_printf(p, "\tpending=%u\n", pstate->pending);
|
||||
}
|
||||
|
||||
static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
|
||||
if (mdp5_state && mdp5_state->base.fb)
|
||||
drm_framebuffer_reference(mdp5_state->base.fb);
|
||||
|
||||
mdp5_state->pending = false;
|
||||
|
||||
return &mdp5_state->base;
|
||||
}
|
||||
|
||||
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
DBG("%s: check (%d -> %d)", plane->name,
|
||||
plane_enabled(old_state), plane_enabled(state));
|
||||
|
||||
/* We don't allow faster-than-vblank updates.. if we did add this
|
||||
* some day, we would need to disallow in cases where hwpipe
|
||||
* changes
|
||||
*/
|
||||
if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
|
||||
return -EBUSY;
|
||||
|
||||
max_width = config->hw->lm.max_width << 16;
|
||||
max_height = config->hw->lm.max_height << 16;
|
||||
|
||||
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct drm_plane_state *state = plane->state;
|
||||
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
|
||||
|
||||
DBG("%s: update", plane->name);
|
||||
|
||||
mdp5_state->pending = true;
|
||||
|
||||
if (plane_enabled(state)) {
|
||||
int ret;
|
||||
|
||||
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
||||
return pstate->hwpipe->flush_mask;
|
||||
}
|
||||
|
||||
/* called after vsync in thread context */
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
|
||||
pstate->pending = false;
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
|
||||
{
|
||||
|
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
|
||||
if (!priv->aspace[id])
|
||||
continue;
|
||||
msm_gem_unmap_vma(priv->aspace[id],
|
||||
&msm_obj->domain[id], msm_obj->sgt);
|
||||
}
|
||||
|
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||
pagefault_disable();
|
||||
}
|
||||
|
||||
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
||||
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
|
||||
!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
|
||||
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
{
|
||||
uint32_t i, last_offset = 0;
|
||||
uint32_t *ptr;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (offset % 4) {
|
||||
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
|
||||
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
|
||||
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
goto out;
|
||||
|
||||
if (submit_reloc.submit_offset % 4) {
|
||||
DRM_ERROR("non-aligned reloc offset: %u\n",
|
||||
submit_reloc.submit_offset);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* offset in dwords: */
|
||||
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
if ((off >= (obj->base.size / 4)) ||
|
||||
(off < last_offset)) {
|
||||
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (valid)
|
||||
continue;
|
||||
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
last_offset = off;
|
||||
}
|
||||
|
||||
out:
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void submit_cleanup(struct msm_gem_submit *submit)
|
||||
|
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
|
||||
struct msm_ringbuffer *ring;
|
||||
int ret;
|
||||
|
||||
size = ALIGN(size, 4); /* size should be dword aligned */
|
||||
if (WARN_ON(!is_power_of_2(size)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring) {
|
||||
|
In neuem Issue referenzieren
Einen Benutzer sperren