Merge tag 'drm-misc-next-2020-09-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.10: UAPI Changes: Cross-subsystem Changes: - virtio: Merged a PR for patches that will affect drm/virtio Core Changes: - dev: More devm_drm convertions and removal of drm_dev_init - atomic: Split out drm_atomic_helper_calc_timestamping_constants of drm_atomic_helper_update_legacy_modeset_state - ttm: More rework Driver Changes: - i915: selftests improvements - panfrost: support for Amlogic SoC - vc4: one fix - tree-wide: conversions to devm_drm_dev_alloc, - ast: simplifications of the atomic modesetting code - panfrost: multiple fixes - vc4: multiple fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20200921152956.2gxnsdgxmwhvjyut@gilmour.lan
This commit is contained in:
@@ -36,6 +36,7 @@ struct dw_mipi_dsi_phy_ops {
|
||||
unsigned int *lane_mbps);
|
||||
int (*get_timing)(void *priv_data, unsigned int lane_mbps,
|
||||
struct dw_mipi_dsi_dphy_timing *timing);
|
||||
int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
|
||||
};
|
||||
|
||||
struct dw_mipi_dsi_host_ops {
|
||||
|
@@ -74,6 +74,9 @@ void
|
||||
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
||||
void
|
||||
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
|
||||
|
||||
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
|
||||
|
@@ -92,7 +92,7 @@ struct drm_device {
|
||||
* NULL.
|
||||
*
|
||||
* Instead of using this pointer it is recommended that drivers use
|
||||
* drm_dev_init() and embed struct &drm_device in their larger
|
||||
* devm_drm_dev_alloc() and embed struct &drm_device in their larger
|
||||
* per-device structure.
|
||||
*/
|
||||
void *dev_private;
|
||||
|
@@ -163,13 +163,12 @@ struct drm_driver {
|
||||
/**
|
||||
* @load:
|
||||
*
|
||||
* Backward-compatible driver callback to complete
|
||||
* initialization steps after the driver is registered. For
|
||||
* this reason, may suffer from race conditions and its use is
|
||||
* deprecated for new drivers. It is therefore only supported
|
||||
* for existing drivers not yet converted to the new scheme.
|
||||
* See drm_dev_init() and drm_dev_register() for proper and
|
||||
* race-free way to set up a &struct drm_device.
|
||||
* Backward-compatible driver callback to complete initialization steps
|
||||
* after the driver is registered. For this reason, may suffer from
|
||||
* race conditions and its use is deprecated for new drivers. It is
|
||||
* therefore only supported for existing drivers not yet converted to
|
||||
* the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
|
||||
* proper and race-free way to set up a &struct drm_device.
|
||||
*
|
||||
* This is deprecated, do not use!
|
||||
*
|
||||
@@ -589,13 +588,6 @@ struct drm_driver {
|
||||
int dev_priv_size;
|
||||
};
|
||||
|
||||
int drm_dev_init(struct drm_device *dev,
|
||||
struct drm_driver *driver,
|
||||
struct device *parent);
|
||||
int devm_drm_dev_init(struct device *parent,
|
||||
struct drm_device *dev,
|
||||
struct drm_driver *driver);
|
||||
|
||||
void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
|
||||
size_t size, size_t offset);
|
||||
|
||||
|
@@ -9,7 +9,6 @@
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include <linux/kernel.h> /* for container_of() */
|
||||
|
||||
@@ -20,9 +19,9 @@ struct drm_simple_display_pipe;
|
||||
struct filp;
|
||||
struct vm_area_struct;
|
||||
|
||||
#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
|
||||
#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
|
||||
#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN
|
||||
#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0)
|
||||
#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1)
|
||||
#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2)
|
||||
|
||||
/*
|
||||
* Buffer-object helpers
|
||||
@@ -101,9 +100,6 @@ u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
|
||||
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
|
||||
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
|
||||
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
|
||||
void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
|
||||
bool *is_iomem);
|
||||
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
|
||||
void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo);
|
||||
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
|
||||
|
||||
|
@@ -338,7 +338,7 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
|
||||
|
||||
/**
|
||||
* drm_mm_nodes - list of nodes under the drm_mm range manager
|
||||
* @mm: the struct drm_mm range manger
|
||||
* @mm: the struct drm_mm range manager
|
||||
*
|
||||
* As the drm_mm range manager hides its node_list deep with its
|
||||
* structure, extracting it looks painful and repetitive. This is
|
||||
|
@@ -350,14 +350,15 @@ struct drm_display_mode {
|
||||
u8 type;
|
||||
|
||||
/**
|
||||
* @private_flags:
|
||||
* @expose_to_userspace:
|
||||
*
|
||||
* Driver private flags. private_flags can only be used for mode
|
||||
* objects passed to drivers in modeset operations. It shouldn't be used
|
||||
* by atomic drivers since they can store any additional data by
|
||||
* subclassing state structures.
|
||||
* Indicates whether the mode is to be exposed to the userspace.
|
||||
* This is to maintain a set of exposed modes while preparing
|
||||
* user-mode's list in drm_mode_getconnector ioctl. The purpose of
|
||||
* this only lies in the ioctl function, and is not to be used
|
||||
* outside the function.
|
||||
*/
|
||||
int private_flags;
|
||||
bool expose_to_userspace;
|
||||
|
||||
/**
|
||||
* @head:
|
||||
@@ -366,19 +367,6 @@ struct drm_display_mode {
|
||||
*/
|
||||
struct list_head head;
|
||||
|
||||
/**
|
||||
* @export_head:
|
||||
*
|
||||
* struct list_head for modes to be exposed to the userspace.
|
||||
* This is to maintain a list of exposed modes while preparing
|
||||
* user-mode's list in drm_mode_getconnector ioctl. The purpose of this
|
||||
* list_head only lies in the ioctl function, and is not expected to be
|
||||
* used outside the function.
|
||||
* Once used, the stale pointers are not reset, but left as it is, to
|
||||
* avoid overhead of protecting it by mode_config.mutex.
|
||||
*/
|
||||
struct list_head export_head;
|
||||
|
||||
/**
|
||||
* @name:
|
||||
*
|
||||
|
@@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
|
||||
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
|
||||
|
||||
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
|
||||
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
|
||||
struct page **pages, unsigned int nr_pages);
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
||||
int flags);
|
||||
|
||||
|
@@ -141,7 +141,6 @@ struct ttm_buffer_object {
|
||||
struct ttm_resource mem;
|
||||
struct file *persistent_swap_storage;
|
||||
struct ttm_tt *ttm;
|
||||
bool evicted;
|
||||
bool deleted;
|
||||
|
||||
/**
|
||||
@@ -151,7 +150,6 @@ struct ttm_buffer_object {
|
||||
struct list_head lru;
|
||||
struct list_head ddestroy;
|
||||
struct list_head swap;
|
||||
struct list_head io_reserve_lru;
|
||||
|
||||
/**
|
||||
* Members protected by a bo reservation.
|
||||
@@ -353,18 +351,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
|
||||
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place);
|
||||
|
||||
/**
|
||||
* ttm_bo_acc_size
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo_size: size of the buffer object in byte.
|
||||
* @struct_size: size of the structure holding buffer object datas
|
||||
*
|
||||
* Returns size to account for a buffer object
|
||||
*/
|
||||
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
|
@@ -77,8 +77,9 @@ struct ttm_bo_driver {
|
||||
* Returns:
|
||||
* -ENOMEM: Out of memory.
|
||||
*/
|
||||
int (*ttm_tt_populate)(struct ttm_tt *ttm,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate
|
||||
@@ -87,7 +88,43 @@ struct ttm_bo_driver {
|
||||
*
|
||||
* Free all backing page
|
||||
*/
|
||||
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
|
||||
void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_bind
|
||||
*
|
||||
* @bdev: Pointer to a ttm device
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
* @bo_mem: Pointer to a struct ttm_resource describing the
|
||||
* memory type and location for binding.
|
||||
*
|
||||
* Bind the backend pages into the aperture in the location
|
||||
* indicated by @bo_mem. This function should be able to handle
|
||||
* differences between aperture and system page sizes.
|
||||
*/
|
||||
int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
|
||||
|
||||
/**
|
||||
* ttm_tt_unbind
|
||||
*
|
||||
* @bdev: Pointer to a ttm device
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Unbind previously bound backend pages. This function should be
|
||||
* able to handle differences between aperture and system page sizes.
|
||||
*/
|
||||
void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_destroy
|
||||
*
|
||||
* @bdev: Pointer to a ttm device
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Destroy the backend. This will be call back from ttm_tt_destroy so
|
||||
* don't call ttm_tt_destroy from the callback or infinite loop.
|
||||
*/
|
||||
void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver member eviction_valuable
|
||||
@@ -356,23 +393,6 @@ struct ttm_lru_bulk_move {
|
||||
struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_flag_masked
|
||||
*
|
||||
* @old: Pointer to the result and original value.
|
||||
* @new: New value of bits.
|
||||
* @mask: Mask of bits to change.
|
||||
*
|
||||
* Convenience function to change a number of bits identified by a mask.
|
||||
*/
|
||||
|
||||
static inline uint32_t
|
||||
ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
|
||||
{
|
||||
*old ^= (*old ^ new) & mask;
|
||||
return *old;
|
||||
}
|
||||
|
||||
/*
|
||||
* ttm_bo.c
|
||||
*/
|
||||
@@ -441,11 +461,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
||||
*/
|
||||
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
|
||||
int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible);
|
||||
void ttm_mem_io_unlock(struct ttm_resource_manager *man);
|
||||
|
||||
/**
|
||||
* ttm_bo_reserve:
|
||||
*
|
||||
@@ -524,6 +539,29 @@ static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
}
|
||||
|
||||
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_resource *new_mem)
|
||||
{
|
||||
bo->mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_move_null = assign memory for a buffer object.
|
||||
* @bo: The bo to assign the memory to
|
||||
* @new_mem: The memory to be assigned.
|
||||
*
|
||||
* Assign the memory from new_mem to the memory of the buffer object bo.
|
||||
*/
|
||||
static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_resource *new_mem)
|
||||
{
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
|
||||
WARN_ON(old_mem->mm_node != NULL);
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_unreserve
|
||||
*
|
||||
@@ -604,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @fence: A fence object that signals when moving is complete.
|
||||
* @evict: This is an evict move. Don't return until the buffer is idle.
|
||||
* @pipeline: evictions are to be pipelined.
|
||||
* @new_mem: struct ttm_resource indicating where to move.
|
||||
*
|
||||
* Accelerated move function to be called when an accelerated move
|
||||
@@ -615,23 +654,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
||||
*/
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
struct dma_fence *fence, bool evict,
|
||||
bool pipeline,
|
||||
struct ttm_resource *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_pipeline_move.
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @fence: A fence object that signals when moving is complete.
|
||||
* @evict: This is an evict move. Don't return until the buffer is idle.
|
||||
* @new_mem: struct ttm_resource indicating where to move.
|
||||
*
|
||||
* Function for pipelining accelerated moves. Either free the memory
|
||||
* immediately or hang it on a temporary buffer object.
|
||||
*/
|
||||
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
||||
struct dma_fence *fence, bool evict,
|
||||
struct ttm_resource *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_pipeline_gutting.
|
||||
*
|
||||
@@ -652,13 +677,30 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
|
||||
*/
|
||||
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
|
||||
|
||||
/**
|
||||
* ttm_bo_tt_bind
|
||||
*
|
||||
* Bind the object tt to a memory resource.
|
||||
*/
|
||||
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_tt_bind
|
||||
*
|
||||
* Unbind the object tt from a memory resource.
|
||||
*/
|
||||
void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_tt_destroy.
|
||||
*/
|
||||
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_range_man_init
|
||||
*
|
||||
* @bdev: ttm device
|
||||
* @type: memory manager type
|
||||
* @available_caching: TTM_PL_FLAG_* for allowed caching modes
|
||||
* @default_caching: default caching mode
|
||||
* @use_tt: if the memory manager uses tt
|
||||
* @p_size: size of area to be managed in pages.
|
||||
*
|
||||
@@ -666,10 +708,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
|
||||
* The range manager is installed for this device in the type slot.
|
||||
*/
|
||||
int ttm_range_man_init(struct ttm_bo_device *bdev,
|
||||
unsigned type,
|
||||
uint32_t available_caching,
|
||||
uint32_t default_caching,
|
||||
bool use_tt,
|
||||
unsigned type, bool use_tt,
|
||||
unsigned long p_size);
|
||||
|
||||
/**
|
||||
|
@@ -58,9 +58,8 @@ struct ttm_validate_buffer {
|
||||
* Undoes all buffer validation reservations for bos pointed to by
|
||||
* the list entries.
|
||||
*/
|
||||
|
||||
extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list);
|
||||
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list);
|
||||
|
||||
/**
|
||||
* function ttm_eu_reserve_buffers
|
||||
@@ -96,10 +95,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
* ttm_eu_fence_buffer_objects() when command submission is complete or
|
||||
* has failed.
|
||||
*/
|
||||
|
||||
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups);
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups);
|
||||
|
||||
/**
|
||||
* function ttm_eu_fence_buffer_objects.
|
||||
@@ -113,9 +111,8 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
* It also unreserves all buffers, putting them on lru lists.
|
||||
*
|
||||
*/
|
||||
|
||||
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list,
|
||||
struct dma_fence *fence);
|
||||
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list,
|
||||
struct dma_fence *fence);
|
||||
|
||||
#endif
|
||||
|
@@ -79,19 +79,17 @@ extern struct ttm_mem_global {
|
||||
#endif
|
||||
} ttm_mem_glob;
|
||||
|
||||
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
|
||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page, uint64_t size,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob,
|
||||
struct page *page, uint64_t size);
|
||||
size_t ttm_round_pot(size_t size);
|
||||
bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount);
|
||||
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page, uint64_t size,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
|
||||
struct page *page, uint64_t size);
|
||||
extern size_t ttm_round_pot(size_t size);
|
||||
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
|
||||
extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
|
||||
uint64_t num_pages, struct ttm_operation_ctx *ctx);
|
||||
#endif
|
||||
|
@@ -42,12 +42,6 @@
|
||||
#define TTM_PL_VRAM 2
|
||||
#define TTM_PL_PRIV 3
|
||||
|
||||
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
|
||||
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
|
||||
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
|
||||
#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV)
|
||||
#define TTM_PL_MASK_MEM 0x0000FFFF
|
||||
|
||||
/*
|
||||
* Other flags that affects data placement.
|
||||
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings
|
||||
@@ -71,8 +65,6 @@
|
||||
TTM_PL_FLAG_UNCACHED | \
|
||||
TTM_PL_FLAG_WC)
|
||||
|
||||
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
|
||||
|
||||
/**
|
||||
* struct ttm_place
|
||||
*
|
||||
@@ -85,6 +77,7 @@
|
||||
struct ttm_place {
|
||||
unsigned fpfn;
|
||||
unsigned lpfn;
|
||||
uint32_t mem_type;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
|
@@ -108,15 +108,7 @@ struct ttm_resource_manager_func {
|
||||
* @gpu_offset: If used, the GPU offset of the first managed page of
|
||||
* fixed memory or the first managed location in an aperture.
|
||||
* @size: Size of the managed region.
|
||||
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
|
||||
* as defined in ttm_placement_common.h
|
||||
* @default_caching: The default caching policy used for a buffer object
|
||||
* placed in this memory type if the user doesn't provide one.
|
||||
* @func: structure pointer implementing the range manager. See above
|
||||
* @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
|
||||
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
|
||||
* reserved by the TTM vm system.
|
||||
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
|
||||
* @move_lock: lock for move fence
|
||||
* static information. bdev::driver::io_mem_free is never used.
|
||||
* @lru: The lru list for this memory type.
|
||||
@@ -131,19 +123,9 @@ struct ttm_resource_manager {
|
||||
bool use_type;
|
||||
bool use_tt;
|
||||
uint64_t size;
|
||||
uint32_t available_caching;
|
||||
uint32_t default_caching;
|
||||
const struct ttm_resource_manager_func *func;
|
||||
struct mutex io_reserve_mutex;
|
||||
bool use_io_reserve_lru;
|
||||
spinlock_t move_lock;
|
||||
|
||||
/*
|
||||
* Protected by @io_reserve_mutex:
|
||||
*/
|
||||
|
||||
struct list_head io_reserve_lru;
|
||||
|
||||
/*
|
||||
* Protected by the global->lru_lock.
|
||||
*/
|
||||
@@ -160,21 +142,15 @@ struct ttm_resource_manager {
|
||||
* struct ttm_bus_placement
|
||||
*
|
||||
* @addr: mapped virtual address
|
||||
* @base: bus base address
|
||||
* @offset: physical addr
|
||||
* @is_iomem: is this io memory ?
|
||||
* @offset: offset from the base address
|
||||
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
|
||||
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
|
||||
*
|
||||
* Structure indicating the bus placement of an object.
|
||||
*/
|
||||
struct ttm_bus_placement {
|
||||
void *addr;
|
||||
phys_addr_t base;
|
||||
unsigned long offset;
|
||||
phys_addr_t offset;
|
||||
bool is_iomem;
|
||||
bool io_reserved_vm;
|
||||
uint64_t io_reserved_count;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -42,54 +42,17 @@ struct ttm_operation_ctx;
|
||||
#define TTM_PAGE_FLAG_SG (1 << 8)
|
||||
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
|
||||
|
||||
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
|
||||
|
||||
enum ttm_caching_state {
|
||||
tt_uncached,
|
||||
tt_wc,
|
||||
tt_cached
|
||||
};
|
||||
|
||||
struct ttm_backend_func {
|
||||
/**
|
||||
* struct ttm_backend_func member bind
|
||||
*
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
* @bo_mem: Pointer to a struct ttm_resource describing the
|
||||
* memory type and location for binding.
|
||||
*
|
||||
* Bind the backend pages into the aperture in the location
|
||||
* indicated by @bo_mem. This function should be able to handle
|
||||
* differences between aperture and system page sizes.
|
||||
*/
|
||||
int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member unbind
|
||||
*
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Unbind previously bound backend pages. This function should be
|
||||
* able to handle differences between aperture and system page sizes.
|
||||
*/
|
||||
void (*unbind) (struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member destroy
|
||||
*
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Destroy the backend. This will be call back from ttm_tt_destroy so
|
||||
* don't call ttm_tt_destroy from the callback or infinite loop.
|
||||
*/
|
||||
void (*destroy) (struct ttm_tt *ttm);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_tt
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @func: Pointer to a struct ttm_backend_func that describes
|
||||
* the backend methods.
|
||||
* pointer.
|
||||
* @pages: Array of pages backing the data.
|
||||
* @num_pages: Number of pages in the page array.
|
||||
* @bdev: Pointer to the current struct ttm_bo_device.
|
||||
@@ -103,21 +66,29 @@ struct ttm_backend_func {
|
||||
* memory.
|
||||
*/
|
||||
struct ttm_tt {
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_backend_func *func;
|
||||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
struct sg_table *sg; /* for SG objects via dma-buf */
|
||||
struct file *swap_storage;
|
||||
enum ttm_caching_state caching_state;
|
||||
enum {
|
||||
tt_bound,
|
||||
tt_unbound,
|
||||
tt_unpopulated,
|
||||
} state;
|
||||
};
|
||||
|
||||
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
|
||||
{
|
||||
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
||||
static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
|
||||
{
|
||||
tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
||||
static inline void ttm_tt_set_populated(struct ttm_tt *tt)
|
||||
{
|
||||
tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ttm_dma_tt
|
||||
*
|
||||
@@ -175,17 +146,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
void ttm_tt_fini(struct ttm_tt *ttm);
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
|
||||
|
||||
/**
|
||||
* ttm_ttm_bind:
|
||||
*
|
||||
* @ttm: The struct ttm_tt containing backing pages.
|
||||
* @bo_mem: The struct ttm_resource identifying the binding location.
|
||||
*
|
||||
* Bind the pages of @ttm to an aperture location identified by @bo_mem
|
||||
*/
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
@@ -193,16 +153,14 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
|
||||
*
|
||||
* Unbind, unpopulate and destroy common struct ttm_tt.
|
||||
*/
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm);
|
||||
void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_unbind:
|
||||
* ttm_tt_destroy_common:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
*
|
||||
* Unbind a struct ttm_tt.
|
||||
* Called from driver to destroy common path.
|
||||
*/
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm);
|
||||
void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_swapin:
|
||||
@@ -227,7 +185,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
* and cache flushes and potential page splitting / combining.
|
||||
*/
|
||||
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
|
||||
int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage);
|
||||
|
||||
/**
|
||||
* ttm_tt_populate - allocate pages for a ttm
|
||||
@@ -236,7 +194,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
|
||||
*
|
||||
* Calls the driver method to allocate pages for a ttm
|
||||
*/
|
||||
int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate - free pages from a ttm
|
||||
@@ -245,7 +203,7 @@ int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
*
|
||||
* Calls the driver method to free all pages from a ttm
|
||||
*/
|
||||
void ttm_tt_unpopulate(struct ttm_tt *ttm);
|
||||
void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
#include <linux/agp_backend.h>
|
||||
@@ -265,8 +223,10 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
|
||||
struct agp_bridge_data *bridge,
|
||||
uint32_t page_flags);
|
||||
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
|
||||
int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem);
|
||||
void ttm_agp_unbind(struct ttm_tt *ttm);
|
||||
void ttm_agp_destroy(struct ttm_tt *ttm);
|
||||
bool ttm_agp_is_bound(struct ttm_tt *ttm);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user