Merge tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "The biggest thing in this is the AMD Navi GPU support, this again contains a bunch of header files that are large. These are the new AMD RX5700 GPUs that just recently became available. New drivers: - ST-Ericsson MCDE driver - Ingenic JZ47xx SoC UAPI change: - HDR source metadata property Core: - HDR inforframes and EDID parsing - drm hdmi infoframe unpacking - remove prime sg_table caching into dma-buf - New gem vram helpers to reduce driver code - Lots of drmP.h removal - reservation fencing fix - documentation updates - drm_fb_helper_connector removed - mode name command handler rewrite fbcon: - Remove the fbcon notifiers ttm: - forward progress fixes dma-buf: - make mmap call optional - debugfs refcount fixes - dma-fence free with pending signals fix - each dma-buf gets an inode Panels: - Lots of additional panel bindings amdgpu: - initial navi10 support - avoid hw reset - HDR metadata support - new thermal sensors for vega asics - RAS fixes - use HMM rather than MMU notifier - xgmi topology via kfd - SR-IOV fixes - driver reload fixes - DC use a core bpc attribute - Aux fixes for DC - Bandwidth calc updates for DC - Clock handling refactor - kfd VEGAM support vmwgfx: - Coherent memory support changes i915: - HDR Support - HDMI i2c link - Icelake multi-segmented gamma support - GuC firmware update - Mule Creek Canyon PCH support for EHL - EHL platform updtes - move i915.alpha_support to i915.force_probe - runtime PM refactoring - VBT parsing refactoring - DSI fixes - struct mutex dependency reduction - GEM code reorg mali-dp: - Komeda driver features msm: - dsi vs EPROBE_DEFER fixes - msm8998 snapdragon 835 support - a540 gpu support - mdp5 and dpu interconnect support exynos: - drmP.h removal tegra: - misc fixes tda998x: - audio support improvements - pixel repeated mode support - quantisation range handling corrections - HDMI vendor info fix armada: - interlace support fix - overlay/video plane register handling refactor - add gamma support rockchip: - RX3328 support panfrost: - expose perf counters via hidden ioctls vkms: - enumerate CRC sources list ast: - rework BO handling mgag200: - rework BO handling dw-hdmi: - suspend/resume support rcar-du: - R8A774A1 Soc Support - LVDS dual-link mode support - Additional formats - Misc fixes omapdrm: - DSI command mode display support stm - fb modifier support - runtime PM support sun4i: - use vmap ops vc4: - binner bo binding rework v3d: - compute shader support - resync/sync fixes - job management refactoring lima: - NULL pointer in irq handler fix - scheduler default timeout virtio: - fence seqno support - trace events bochs: - misc fixes tc458767: - IRQ/HDP handling sii902x: - HDMI audio support atmel-hlcdc: - misc fixes meson: - zpos support" * tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm: (1815 commits) Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next" Revert "mm: adjust apply_to_pfn_range interface for dropped token." mm: adjust apply_to_pfn_range interface for dropped token. drm/amdgpu/navi10: add uclk activity sensor drm/amdgpu: properly guard the generic discovery code drm/amdgpu: add missing documentation on new module parameters drm/amdgpu: don't invalidate caches in RELEASE_MEM, only do the writeback drm/amd/display: avoid 64-bit division drm/amdgpu/psp11: simplify the ucode register logic drm/amdgpu: properly guard DC support in navi code drm/amd/powerplay: vega20: fix uninitialized variable use drm/amd/display: dcn20: include linux/delay.h amdgpu: make pmu support optional drm/amd/powerplay: Zero initialize current_rpm in vega20_get_fan_speed_percent drm/amd/powerplay: Zero initialize freq in smu_v11_0_get_current_clk_freq drm/amd/powerplay: Use memset to initialize metrics structs drm/amdgpu/mes10.1: Fix header guard drm/amd/powerplay: add temperature sensor support for navi10 drm/amdgpu: fix scheduler timeout calc drm/amdgpu: Prepare for hmm_range_register API change (v2) ...
This commit is contained in:
@@ -796,13 +796,36 @@ vc4_prime_import_sg_table(struct drm_device *dev,
|
||||
return obj;
|
||||
}
|
||||
|
||||
static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!vc4->v3d)
|
||||
return -ENODEV;
|
||||
|
||||
if (vc4file->bin_bo_used)
|
||||
return 0;
|
||||
|
||||
ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vc4_create_bo *args = data;
|
||||
struct vc4_file *vc4file = file_priv->driver_priv;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_bo *bo = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vc4_grab_bin_bo(vc4, vc4file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We can't allocate from the BO cache, because the BOs don't
|
||||
* get zeroed, and that might leak data between users.
|
||||
@@ -843,6 +866,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vc4_create_shader_bo *args = data;
|
||||
struct vc4_file *vc4file = file_priv->driver_priv;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_bo *bo = NULL;
|
||||
int ret;
|
||||
|
||||
@@ -862,6 +887,10 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = vc4_grab_bin_bo(vc4, vc4file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
@@ -891,7 +920,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
|
||||
*/
|
||||
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
|
||||
|
||||
fail:
|
||||
fail:
|
||||
drm_gem_object_put_unlocked(&bo->base.base);
|
||||
|
||||
return ret;
|
||||
|
@@ -26,13 +26,9 @@ vc4_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
|
||||
struct vc4_debugfs_info_entry *entry;
|
||||
struct dentry *dentry;
|
||||
|
||||
dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
|
||||
minor->debugfs_root,
|
||||
&vc4->load_tracker_enabled);
|
||||
if (!dentry)
|
||||
return -ENOMEM;
|
||||
debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
|
||||
minor->debugfs_root, &vc4->load_tracker_enabled);
|
||||
|
||||
list_for_each_entry(entry, &vc4->debugfs_list, link) {
|
||||
int ret = drm_debugfs_create_files(&entry->info, 1,
|
||||
|
@@ -125,8 +125,12 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file)
|
||||
|
||||
static void vc4_close(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_file *vc4file = file->driver_priv;
|
||||
|
||||
if (vc4file->bin_bo_used)
|
||||
vc4_v3d_bin_bo_put(vc4);
|
||||
|
||||
vc4_perfmon_close_file(vc4file);
|
||||
kfree(vc4file);
|
||||
}
|
||||
@@ -271,6 +275,8 @@ static int vc4_drm_bind(struct device *dev)
|
||||
drm->dev_private = vc4;
|
||||
INIT_LIST_HEAD(&vc4->debugfs_list);
|
||||
|
||||
mutex_init(&vc4->bin_bo_lock);
|
||||
|
||||
ret = vc4_bo_cache_init(drm);
|
||||
if (ret)
|
||||
goto dev_put;
|
||||
|
@@ -213,6 +213,11 @@ struct vc4_dev {
|
||||
* the minor is available (after drm_dev_register()).
|
||||
*/
|
||||
struct list_head debugfs_list;
|
||||
|
||||
/* Mutex for binner bo allocation. */
|
||||
struct mutex bin_bo_lock;
|
||||
/* Reference count for our binner bo. */
|
||||
struct kref bin_bo_kref;
|
||||
};
|
||||
|
||||
static inline struct vc4_dev *
|
||||
@@ -581,6 +586,11 @@ struct vc4_exec_info {
|
||||
* NULL otherwise.
|
||||
*/
|
||||
struct vc4_perfmon *perfmon;
|
||||
|
||||
/* Whether the exec has taken a reference to the binner BO, which should
|
||||
* happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
|
||||
*/
|
||||
bool bin_bo_used;
|
||||
};
|
||||
|
||||
/* Per-open file private data. Any driver-specific resource that has to be
|
||||
@@ -591,6 +601,8 @@ struct vc4_file {
|
||||
struct idr idr;
|
||||
struct mutex lock;
|
||||
} perfmon;
|
||||
|
||||
bool bin_bo_used;
|
||||
};
|
||||
|
||||
static inline struct vc4_exec_info *
|
||||
@@ -830,6 +842,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
|
||||
extern struct platform_driver vc4_v3d_driver;
|
||||
extern const struct of_device_id vc4_v3d_dt_match[];
|
||||
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
|
||||
int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
|
||||
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
|
||||
int vc4_v3d_pm_get(struct vc4_dev *vc4);
|
||||
void vc4_v3d_pm_put(struct vc4_dev *vc4);
|
||||
|
||||
|
@@ -820,6 +820,7 @@ static int
|
||||
vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
||||
{
|
||||
struct drm_vc4_submit_cl *args = exec->args;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
void *temp = NULL;
|
||||
void *bin;
|
||||
int ret = 0;
|
||||
@@ -918,6 +919,12 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (exec->found_tile_binning_mode_config_packet) {
|
||||
ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Block waiting on any previous rendering into the CS's VBO,
|
||||
* IB, or textures, so that pixels are actually written by the
|
||||
* time we try to read them.
|
||||
@@ -966,6 +973,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
|
||||
vc4->bin_alloc_used &= ~exec->bin_slots;
|
||||
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
|
||||
|
||||
/* Release the reference on the binner BO if needed. */
|
||||
if (exec->bin_bo_used)
|
||||
vc4_v3d_bin_bo_put(vc4);
|
||||
|
||||
/* Release the reference we had on the perf monitor. */
|
||||
vc4_perfmon_put(exec->perfmon);
|
||||
|
||||
|
@@ -247,11 +247,17 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
|
||||
{
|
||||
drm_atomic_helper_connector_reset(connector);
|
||||
drm_atomic_helper_connector_tv_reset(connector);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
|
||||
.detect = vc4_hdmi_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = vc4_hdmi_connector_destroy,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.reset = vc4_hdmi_connector_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
@@ -59,15 +59,22 @@ vc4_overflow_mem_work(struct work_struct *work)
|
||||
{
|
||||
struct vc4_dev *vc4 =
|
||||
container_of(work, struct vc4_dev, overflow_mem_work);
|
||||
struct vc4_bo *bo = vc4->bin_bo;
|
||||
struct vc4_bo *bo;
|
||||
int bin_bo_slot;
|
||||
struct vc4_exec_info *exec;
|
||||
unsigned long irqflags;
|
||||
|
||||
mutex_lock(&vc4->bin_bo_lock);
|
||||
|
||||
if (!vc4->bin_bo)
|
||||
goto complete;
|
||||
|
||||
bo = vc4->bin_bo;
|
||||
|
||||
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
|
||||
if (bin_bo_slot < 0) {
|
||||
DRM_ERROR("Couldn't allocate binner overflow mem\n");
|
||||
return;
|
||||
goto complete;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
||||
@@ -98,6 +105,9 @@ vc4_overflow_mem_work(struct work_struct *work)
|
||||
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
|
||||
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
|
||||
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
|
||||
|
||||
complete:
|
||||
mutex_unlock(&vc4->bin_bo_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -249,8 +259,10 @@ vc4_irq_postinstall(struct drm_device *dev)
|
||||
if (!vc4->v3d)
|
||||
return 0;
|
||||
|
||||
/* Enable both the render done and out of memory interrupts. */
|
||||
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
|
||||
/* Enable the render done interrupts. The out-of-memory interrupt is
|
||||
* enabled as soon as we have a binner BO allocated.
|
||||
*/
|
||||
V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -307,10 +307,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
|
||||
u32 subpixel_src_mask = (1 << 16) - 1;
|
||||
u32 format = fb->format->format;
|
||||
int num_planes = fb->format->num_planes;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
u32 h_subsample, v_subsample;
|
||||
u32 h_subsample = fb->format->hsub;
|
||||
u32 v_subsample = fb->format->vsub;
|
||||
int i, ret;
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
|
||||
@@ -325,9 +325,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
h_subsample = drm_format_horz_chroma_subsampling(format);
|
||||
v_subsample = drm_format_vert_chroma_subsampling(format);
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
|
||||
|
||||
@@ -589,8 +586,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
|
||||
u32 ctl0_offset = vc4_state->dlist_count;
|
||||
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
|
||||
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
|
||||
int num_planes = drm_format_num_planes(format->drm);
|
||||
u32 h_subsample, v_subsample;
|
||||
int num_planes = fb->format->num_planes;
|
||||
u32 h_subsample = fb->format->hsub;
|
||||
u32 v_subsample = fb->format->vsub;
|
||||
bool mix_plane_alpha;
|
||||
bool covers_screen;
|
||||
u32 scl0, scl1, pitch0;
|
||||
@@ -620,9 +618,6 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
|
||||
scl1 = vc4_get_scl_field(state, 0);
|
||||
}
|
||||
|
||||
h_subsample = drm_format_horz_chroma_subsampling(format->drm);
|
||||
v_subsample = drm_format_vert_chroma_subsampling(format->drm);
|
||||
|
||||
rotation = drm_rotation_simplify(state->rotation,
|
||||
DRM_MODE_ROTATE_0 |
|
||||
DRM_MODE_REFLECT_X |
|
||||
|
@@ -221,17 +221,18 @@ static const u32 txp_fmts[] = {
|
||||
};
|
||||
|
||||
static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
|
||||
struct drm_connector_state *conn_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_framebuffer *fb;
|
||||
int i;
|
||||
|
||||
conn_state = drm_atomic_get_new_connector_state(state, conn);
|
||||
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
|
||||
return 0;
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
|
||||
conn_state->crtc);
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
|
||||
fb = conn_state->writeback_job->fb;
|
||||
if (fb->width != crtc_state->mode.hdisplay ||
|
||||
|
@@ -202,7 +202,7 @@ try_again:
|
||||
}
|
||||
|
||||
/**
|
||||
* vc4_allocate_bin_bo() - allocates the memory that will be used for
|
||||
* bin_bo_alloc() - allocates the memory that will be used for
|
||||
* tile binning.
|
||||
*
|
||||
* The binner has a limitation that the addresses in the tile state
|
||||
@@ -223,14 +223,16 @@ try_again:
|
||||
* overall CMA pool before they make scenes complicated enough to run
|
||||
* out of bin space.
|
||||
*/
|
||||
static int vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
static int bin_bo_alloc(struct vc4_dev *vc4)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
struct vc4_v3d *v3d = vc4->v3d;
|
||||
uint32_t size = 16 * 1024 * 1024;
|
||||
int ret = 0;
|
||||
struct list_head list;
|
||||
|
||||
if (!v3d)
|
||||
return -ENODEV;
|
||||
|
||||
/* We may need to try allocating more than once to get a BO
|
||||
* that doesn't cross 256MB. Track the ones we've allocated
|
||||
* that failed so far, so that we can free them when we've got
|
||||
@@ -240,7 +242,7 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
while (true) {
|
||||
struct vc4_bo *bo = vc4_bo_create(drm, size, true,
|
||||
struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
|
||||
VC4_BO_TYPE_BIN);
|
||||
|
||||
if (IS_ERR(bo)) {
|
||||
@@ -281,6 +283,14 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 !=
|
||||
bo->base.base.size / vc4->bin_alloc_size);
|
||||
|
||||
kref_init(&vc4->bin_bo_kref);
|
||||
|
||||
/* Enable the out-of-memory interrupt to set our
|
||||
* newly-allocated binner BO, potentially from an
|
||||
* already-pending-but-masked interrupt.
|
||||
*/
|
||||
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -300,6 +310,47 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&vc4->bin_bo_lock);
|
||||
|
||||
if (used && *used)
|
||||
goto complete;
|
||||
|
||||
if (vc4->bin_bo)
|
||||
kref_get(&vc4->bin_bo_kref);
|
||||
else
|
||||
ret = bin_bo_alloc(vc4);
|
||||
|
||||
if (ret == 0 && used)
|
||||
*used = true;
|
||||
|
||||
complete:
|
||||
mutex_unlock(&vc4->bin_bo_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bin_bo_release(struct kref *ref)
|
||||
{
|
||||
struct vc4_dev *vc4 = container_of(ref, struct vc4_dev, bin_bo_kref);
|
||||
|
||||
if (WARN_ON_ONCE(!vc4->bin_bo))
|
||||
return;
|
||||
|
||||
drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
|
||||
vc4->bin_bo = NULL;
|
||||
}
|
||||
|
||||
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
|
||||
{
|
||||
mutex_lock(&vc4->bin_bo_lock);
|
||||
kref_put(&vc4->bin_bo_kref, bin_bo_release);
|
||||
mutex_unlock(&vc4->bin_bo_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int vc4_v3d_runtime_suspend(struct device *dev)
|
||||
{
|
||||
@@ -308,9 +359,6 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
|
||||
|
||||
vc4_irq_uninstall(vc4->dev);
|
||||
|
||||
drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
|
||||
vc4->bin_bo = NULL;
|
||||
|
||||
clk_disable_unprepare(v3d->clk);
|
||||
|
||||
return 0;
|
||||
@@ -322,10 +370,6 @@ static int vc4_v3d_runtime_resume(struct device *dev)
|
||||
struct vc4_dev *vc4 = v3d->vc4;
|
||||
int ret;
|
||||
|
||||
ret = vc4_allocate_bin_bo(vc4->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(v3d->clk);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
@@ -392,12 +436,6 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = vc4_allocate_bin_bo(drm);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(v3d->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Reset the binner overflow address/size at setup, to be sure
|
||||
* we don't reuse an old one.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user