drm/i915: Move object->pages API to i915_gem_object.[ch]
Currently the code for manipulating the pages on an object is still residing in i915_gem.c, move it to i915_gem_object.c Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-3-chris@chris-wilson.co.uk
This commit is contained in:
90
drivers/gpu/drm/i915/gem/i915_gem_object.c
Normal file
90
drivers/gpu/drm/i915/gem/i915_gem_object.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright © 2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_object.h"
|
||||
#include "i915_globals.h"
|
||||
|
||||
static struct i915_global_object {
|
||||
struct i915_global base;
|
||||
struct kmem_cache *slab_objects;
|
||||
} global;
|
||||
|
||||
struct drm_i915_gem_object *i915_gem_object_alloc(void)
|
||||
{
|
||||
return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return kmem_cache_free(global.slab_objects, obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark up the object's coherency levels for a given cache_level
|
||||
* @obj: #drm_i915_gem_object
|
||||
* @cache_level: cache level
|
||||
*/
|
||||
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level)
|
||||
{
|
||||
obj->cache_level = cache_level;
|
||||
|
||||
if (cache_level != I915_CACHE_NONE)
|
||||
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
|
||||
I915_BO_CACHE_COHERENT_FOR_WRITE);
|
||||
else if (HAS_LLC(to_i915(obj->base.dev)))
|
||||
obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
|
||||
else
|
||||
obj->cache_coherent = 0;
|
||||
|
||||
obj->cache_dirty =
|
||||
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
|
||||
}
|
||||
|
||||
static void i915_global_objects_shrink(void)
|
||||
{
|
||||
kmem_cache_shrink(global.slab_objects);
|
||||
}
|
||||
|
||||
static void i915_global_objects_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(global.slab_objects);
|
||||
}
|
||||
|
||||
static struct i915_global_object global = { {
|
||||
.shrink = i915_global_objects_shrink,
|
||||
.exit = i915_global_objects_exit,
|
||||
} };
|
||||
|
||||
int __init i915_global_objects_init(void)
|
||||
{
|
||||
global.slab_objects =
|
||||
KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
|
||||
if (!global.slab_objects)
|
||||
return -ENOMEM;
|
||||
|
||||
i915_global_register(&global.base);
|
||||
return 0;
|
||||
}
|
350
drivers/gpu/drm/i915/gem/i915_gem_object.h
Normal file
350
drivers/gpu/drm/i915/gem/i915_gem_object.h
Normal file
@@ -0,0 +1,350 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_OBJECT_H__
|
||||
#define __I915_GEM_OBJECT_H__
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_device.h>
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_gem_object_types.h"
|
||||
|
||||
struct drm_i915_gem_object *i915_gem_object_alloc(void);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
|
||||
/**
|
||||
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
|
||||
* @filp: DRM file private date
|
||||
* @handle: userspace handle
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* A pointer to the object named by the handle if such exists on @filp, NULL
|
||||
* otherwise. This object is only valid whilst under the RCU read lock, and
|
||||
* note carefully the object may be in the process of being destroyed.
|
||||
*/
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
|
||||
#endif
|
||||
return idr_find(&file->object_idr, handle);
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_gem_object_lookup(struct drm_file *file, u32 handle)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
rcu_read_lock();
|
||||
obj = i915_gem_object_lookup_rcu(file, handle);
|
||||
if (obj && !kref_get_unless_zero(&obj->base.refcount))
|
||||
obj = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
__deprecated
|
||||
extern struct drm_gem_object *
|
||||
drm_gem_object_lookup(struct drm_file *file, u32 handle);
|
||||
|
||||
__attribute__((nonnull))
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_gem_object_get(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_get(&obj->base);
|
||||
return obj;
|
||||
}
|
||||
|
||||
__attribute__((nonnull))
|
||||
static inline void
|
||||
i915_gem_object_put(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
__drm_gem_object_put(&obj->base);
|
||||
}
|
||||
|
||||
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
reservation_object_lock(obj->resv, NULL);
|
||||
}
|
||||
|
||||
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
reservation_object_unlock(obj->resv);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
obj->base.vma_node.readonly = true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->base.vma_node.readonly;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->active_count;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
|
||||
}
|
||||
|
||||
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return READ_ONCE(obj->framebuffer_references);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->tiling_and_stride & TILING_MASK;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->tiling_and_stride & STRIDE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_tile_height(unsigned int tiling)
|
||||
{
|
||||
GEM_BUG_ON(!tiling);
|
||||
return tiling == I915_TILING_Y ? 32 : 8;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return (i915_gem_object_get_stride(obj) *
|
||||
i915_gem_object_get_tile_height(obj));
|
||||
}
|
||||
|
||||
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
unsigned int tiling, unsigned int stride);
|
||||
|
||||
struct scatterlist *
|
||||
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
unsigned int n, unsigned int *offset);
|
||||
|
||||
struct page *
|
||||
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n);
|
||||
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n);
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
unsigned int *len);
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
||||
unsigned long n);
|
||||
|
||||
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
unsigned int sg_page_sizes);
|
||||
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline int __must_check
|
||||
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
might_lock(&obj->mm.lock);
|
||||
|
||||
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
|
||||
return 0;
|
||||
|
||||
return __i915_gem_object_get_pages(obj);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
|
||||
}
|
||||
|
||||
static inline void
|
||||
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return atomic_read(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
atomic_dec(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
__i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
|
||||
I915_MM_NORMAL = 0,
|
||||
I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
|
||||
};
|
||||
|
||||
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass);
|
||||
void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
|
||||
enum i915_map_type {
|
||||
I915_MAP_WB = 0,
|
||||
I915_MAP_WC,
|
||||
#define I915_MAP_OVERRIDE BIT(31)
|
||||
I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
|
||||
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
|
||||
};
|
||||
|
||||
/**
|
||||
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
|
||||
* @obj: the object to map into kernel address space
|
||||
* @type: the type of mapping, used to select pgprot_t
|
||||
*
|
||||
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
|
||||
* pages and then returns a contiguous mapping of the backing storage into
|
||||
* the kernel address space. Based on the @type of mapping, the PTE will be
|
||||
* set to either WriteBack or WriteCombine (via pgprot_t).
|
||||
*
|
||||
* The caller is responsible for calling i915_gem_object_unpin_map() when the
|
||||
* mapping is no longer required.
|
||||
*
|
||||
* Returns the pointer through which to access the mapped object, or an
|
||||
* ERR_PTR() on error.
|
||||
*/
|
||||
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type);
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size);
|
||||
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
__i915_gem_object_flush_map(obj, 0, obj->base.size);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_unpin_map - releases an earlier mapping
|
||||
* @obj: the object to unmap
|
||||
*
|
||||
* After pinning the object and mapping its pages, once you are finished
|
||||
* with your access, call i915_gem_object_unpin_map() to release the pin
|
||||
* upon the mapping. Once the pin count reaches zero, that mapping may be
|
||||
* removed.
|
||||
*/
|
||||
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_engine_cs *engine = NULL;
|
||||
struct dma_fence *fence;
|
||||
|
||||
rcu_read_lock();
|
||||
fence = reservation_object_get_excl_rcu(obj->resv);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
|
||||
engine = to_request(fence)->engine;
|
||||
dma_fence_put(fence);
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
||||
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level);
|
||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
||||
|
||||
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
bool needs_clflush);
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user