Merge airlied/drm-next into drm-intel-next-queued

Commit 'aee3bac0a3a8 ("drm/i915/psr: Tie PSR2 support to Y
coordinate requirement")' got merged to drm-intel-next-queued
but the variable was defined commit 'c5fe47327b06 ("drm: Add PSR
version 3 macro") who was merged through drm-misc.

So backmerging to get drm-intel-next-queued compiling back again.

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Rodrigo Vivi
2018-03-30 11:52:33 -07:00
2238 changed files with 138992 additions and 28596 deletions

View File

@@ -45,6 +45,7 @@ enum amd_asic_type {
CHIP_POLARIS11,
CHIP_POLARIS12,
CHIP_VEGA10,
CHIP_VEGA12,
CHIP_RAVEN,
CHIP_LAST,
};

View File

@@ -13,6 +13,8 @@
#include <drm/drm_crtc.h>
struct analogix_dp_device;
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
@@ -29,6 +31,7 @@ struct analogix_dp_plat_data {
struct drm_panel *panel;
struct drm_encoder *encoder;
struct drm_connector *connector;
bool skip_connector;
int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
@@ -38,16 +41,17 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
int analogix_dp_psr_supported(struct device *dev);
int analogix_dp_enable_psr(struct device *dev);
int analogix_dp_disable_psr(struct device *dev);
int analogix_dp_psr_enabled(struct analogix_dp_device *dp);
int analogix_dp_enable_psr(struct analogix_dp_device *dp);
int analogix_dp_disable_psr(struct analogix_dp_device *dp);
int analogix_dp_resume(struct device *dev);
int analogix_dp_suspend(struct device *dev);
int analogix_dp_resume(struct analogix_dp_device *dp);
int analogix_dp_suspend(struct analogix_dp_device *dp);
int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
struct analogix_dp_plat_data *plat_data);
void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
struct analogix_dp_device *
analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
struct analogix_dp_plat_data *plat_data);
void analogix_dp_unbind(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);

View File

@@ -753,6 +753,28 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(old_plane_state) = (__state)->planes[__i].old_state,\
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
* for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic
* update in reverse order
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @old_plane_state: &struct drm_plane_state iteration cursor for the old state
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update in reverse order,
* tracking both old and new state. This is useful in places where the
* state delta needs to be considered, for example in atomic check functions.
*/
#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
(__i) >= 0; \
(__i)--) \
for_each_if ((__state)->planes[__i].ptr && \
((plane) = (__state)->planes[__i].ptr, \
(old_plane_state) = (__state)->planes[__i].old_state,\
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer

View File

@@ -40,7 +40,6 @@ int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
const struct drm_rect *clip,
int min_scale,
int max_scale,
bool can_position,

View File

@@ -26,6 +26,7 @@
#include <linux/ctype.h>
struct drm_crtc;
struct drm_plane;
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
@@ -37,4 +38,34 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
/**
* drm_color_lut_size - calculate the number of entries in the LUT
* @blob: blob containing the LUT
*
* Returns:
* The number of entries in the color LUT stored in @blob.
*/
static inline int drm_color_lut_size(const struct drm_property_blob *blob)
{
return blob->length / sizeof(struct drm_color_lut);
}
enum drm_color_encoding {
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_BT2020,
DRM_COLOR_ENCODING_MAX,
};
enum drm_color_range {
DRM_COLOR_YCBCR_LIMITED_RANGE,
DRM_COLOR_YCBCR_FULL_RANGE,
DRM_COLOR_RANGE_MAX,
};
int drm_plane_create_color_properties(struct drm_plane *plane,
u32 supported_encodings,
u32 supported_ranges,
enum drm_color_encoding default_encoding,
enum drm_color_range default_range);
#endif

View File

@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
#endif

View File

@@ -288,6 +288,7 @@
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1

View File

@@ -56,6 +56,7 @@ struct drm_printer;
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
#define DRIVER_SYNCOBJ 0x40000
#define DRIVER_PREFER_XBGR_30BPP 0x80000
/**
* struct drm_driver - DRM driver structure

View File

@@ -120,30 +120,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_get(struct drm_mode_object *obj);
void drm_mode_object_put(struct drm_mode_object *obj);
/**
* drm_mode_object_reference - acquire a mode object reference
* @obj: DRM mode object
*
* This is a compatibility alias for drm_mode_object_get() and should not be
* used by new code.
*/
static inline void drm_mode_object_reference(struct drm_mode_object *obj)
{
drm_mode_object_get(obj);
}
/**
* drm_mode_object_unreference - release a mode object reference
* @obj: DRM mode object
*
* This is a compatibility alias for drm_mode_object_put() and should not be
* used by new code.
*/
static inline void drm_mode_object_unreference(struct drm_mode_object *obj)
{
drm_mode_object_put(obj);
}
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);

View File

@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_color_mgmt.h>
struct drm_crtc;
struct drm_printer;
@@ -112,6 +113,20 @@ struct drm_plane_state {
unsigned int zpos;
unsigned int normalized_zpos;
/**
* @color_encoding:
*
* Color encoding for non RGB formats
*/
enum drm_color_encoding color_encoding;
/**
* @color_range:
*
* Color range for non RGB formats
*/
enum drm_color_range color_range;
/* Clipped coordinates */
struct drm_rect src, dst;
@@ -558,6 +573,23 @@ struct drm_plane {
struct drm_property *zpos_property;
struct drm_property *rotation_property;
/**
* @color_encoding_property:
*
* Optional "COLOR_ENCODING" enum property for specifying
* color encoding for non RGB formats.
* See drm_plane_create_color_properties().
*/
struct drm_property *color_encoding_property;
/**
* @color_range_property:
*
* Optional "COLOR_RANGE" enum property for specifying
* color range for non RGB formats.
* See drm_plane_create_color_properties().
*/
struct drm_property *color_range_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)

View File

@@ -43,7 +43,6 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
unsigned int rotation,
int min_scale,
int max_scale,

View File

@@ -196,21 +196,22 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
__printf(6, 7)
void drm_dev_printk(const struct device *dev, const char *level,
unsigned int category, const char *function_name,
const char *prefix, const char *format, ...);
__printf(3, 4)
void drm_printk(const char *level, unsigned int category,
const char *format, ...);
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
__printf(3, 4)
void drm_dev_dbg(const struct device *dev, unsigned int category,
const char *format, ...);
__printf(2, 3)
void drm_dbg(unsigned int category, const char *format, ...);
__printf(1, 2)
void drm_err(const char *format, ...);
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
do { \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
##__VA_ARGS__); \
} while (0)
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
@@ -233,10 +234,9 @@ void drm_printk(const char *level, unsigned int category,
* @fmt: printf() like format string.
*/
#define DRM_DEV_ERROR(dev, fmt, ...) \
drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
fmt, ##__VA_ARGS__)
drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
#define DRM_ERROR(fmt, ...) \
drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -257,8 +257,7 @@ void drm_printk(const char *level, unsigned int category,
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
##__VA_ARGS__)
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
@@ -275,53 +274,46 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
#define DRM_DEV_DEBUG(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
##args)
#define DRM_DEV_DEBUG(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
#define DRM_DEBUG(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
fmt, ##args)
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_DRIVER(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
##args)
#define DRM_DEBUG_KMS(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_KMS(fmt, ...) \
drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
fmt, ##args)
#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_PRIME(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
fmt, ##args)
#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_ATOMIC(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
##args)
#define DRM_DEBUG_VBL(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_VBL(fmt, ...) \
drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_LEASE(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
__func__, "", fmt, ##args); \
drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
})
/**
@@ -330,21 +322,28 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
#define DRM_DEBUG_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
fmt, ##__VA_ARGS__)
#define DRM_DEBUG_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
fmt, ##__VA_ARGS__)
#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
fmt, ##__VA_ARGS__)
#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
fmt, ##__VA_ARGS__)
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#endif /* DRM_PRINT_H_ */

View File

@@ -209,7 +209,7 @@ struct drm_property_blob {
struct list_head head_global;
struct list_head head_file;
size_t length;
unsigned char data[];
void *data;
};
struct drm_prop_enum_list {
@@ -237,27 +237,29 @@ static inline bool drm_property_type_is(struct drm_property *property,
return property->flags & type;
}
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
struct drm_property *drm_property_create(struct drm_device *dev,
u32 flags, const char *name,
int num_values);
struct drm_property *drm_property_create_enum(struct drm_device *dev,
u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_props,
uint64_t supported_bits);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
struct drm_property *drm_property_create_range(struct drm_device *dev,
u32 flags, const char *name,
uint64_t min, uint64_t max);
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
u32 flags, const char *name,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
const char *name);
u32 flags, const char *name,
uint32_t type);
struct drm_property *drm_property_create_bool(struct drm_device *dev,
u32 flags, const char *name);
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
void drm_property_destroy(struct drm_device *dev, struct drm_property *property);

View File

@@ -24,9 +24,30 @@ struct drm_simple_display_pipe_funcs {
/**
* @mode_valid:
*
* This function is called to filter out valid modes from the
* suggestions suggested by the bridge or display. This optional
* hook is passed in when initializing the pipeline.
* This callback is used to check if a specific mode is valid in the
* crtc used in this simple display pipe. This should be implemented
* if the display pipe has some sort of restriction in the modes
* it can display. For example, a given display pipe may be responsible
* to set a clock value. If the clock can not produce all the values
* for the available modes then this callback can be used to restrict
* the number of modes to only the ones that can be displayed. Another
* reason can be bandwidth mitigation: the memory port on the display
* controller can have bandwidth limitations not allowing pixel data
* to be fetched at any rate.
*
* This hook is used by the probe helpers to filter the mode list in
* drm_helper_probe_single_connector_modes(), and it is used by the
* atomic helpers to validate modes supplied by userspace in
* drm_atomic_helper_check_modeset().
*
* This function is optional.
*
* NOTE:
*
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
* against configuration-invariant hardware constraints.
*
* RETURNS:
*
@@ -107,6 +128,24 @@ struct drm_simple_display_pipe_funcs {
*/
void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
/**
* @enable_vblank:
*
* Optional, called by &drm_crtc_funcs.enable_vblank. Please read
* the documentation for the &drm_crtc_funcs.enable_vblank hook for
* more details.
*/
int (*enable_vblank)(struct drm_simple_display_pipe *pipe);
/**
* @disable_vblank:
*
* Optional, called by &drm_crtc_funcs.disable_vblank. Please read
* the documentation for the &drm_crtc_funcs.disable_vblank hook for
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
};
/**

View File

@@ -41,6 +41,8 @@
#include <linux/bitmap.h>
#include <linux/reservation.h>
struct ttm_bo_global;
struct ttm_bo_device;
struct drm_mm_node;
@@ -169,7 +171,6 @@ struct ttm_buffer_object {
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
@@ -263,8 +264,8 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @allow_reserved_eviction: Allow eviction of reserved BOs.
* @resv: Reservation object to allow reserved evictions with.
* @flags: Including the following flags
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -272,11 +273,16 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
bool allow_reserved_eviction;
struct reservation_object *resv;
uint64_t bytes_moved;
uint32_t flags;
};
/* Allow eviction of reserved BOs */
#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
/* when serving page fault or suspend, allow alloc anyway */
#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
@@ -461,11 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
@@ -498,7 +499,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
struct reservation_object *resv,
@@ -515,7 +515,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
@@ -545,8 +544,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
unsigned long size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible,
struct file *persistent_swap_storage, size_t acc_size,
uint32_t page_alignment, bool interrubtible, size_t acc_size,
struct sg_table *sg, struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -560,11 +558,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
@@ -577,7 +570,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t page_alignment, bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo);
/**
@@ -717,6 +709,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
/**
* ttm_bo_io
*

View File

@@ -42,114 +42,10 @@
#include "ttm_memory.h"
#include "ttm_module.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#define TTM_MAX_BO_PRIORITY 4U
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
*
* @ttm: Pointer to a struct ttm_tt.
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding.
*
* Bind the backend pages into the aperture in the location
* indicated by @bo_mem. This function should be able to handle
* differences between aperture and system page sizes.
*/
int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
* struct ttm_backend_func member unbind
*
* @ttm: Pointer to a struct ttm_tt.
*
* Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes.
*/
int (*unbind) (struct ttm_tt *ttm);
/**
* struct ttm_backend_func member destroy
*
* @ttm: Pointer to a struct ttm_tt.
*
* Destroy the backend. This will be call back from ttm_tt_destroy so
* don't call ttm_tt_destroy from the callback or infinite loop.
*/
void (*destroy) (struct ttm_tt *ttm);
};
#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
enum ttm_caching_state {
tt_uncached,
tt_wc,
tt_cached
};
/**
* struct ttm_tt
*
* @bdev: Pointer to a struct ttm_bo_device.
* @func: Pointer to a struct ttm_backend_func that describes
* the backend methods.
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
* pointer.
* @pages: Array of pages backing the data.
* @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device.
* @be: Pointer to the ttm backend.
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
struct ttm_bo_device *bdev;
struct ttm_backend_func *func;
struct page *dummy_read_page;
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
tt_bound,
tt_unbound,
tt_unpopulated,
} state;
};
/**
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
dma_addr_t *dma_address;
struct list_head pages_list;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
@@ -329,20 +225,16 @@ struct ttm_bo_driver {
/**
* ttm_tt_create
*
* @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing.
* @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
uint32_t page_flags);
/**
* ttm_tt_populate
@@ -615,109 +507,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
return *old;
}
/**
* ttm_tt_init
*
* @ttm: The struct ttm_tt.
* @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
/**
* ttm_tt_fini
*
* @ttm: the ttm_tt structure.
*
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_bind:
*
* @ttm: The struct ttm_tt containing backing pages.
* @bo_mem: The struct ttm_mem_reg identifying the binding location.
*
* Bind the pages of @ttm to an aperture location identified by @bo_mem
*/
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
struct ttm_operation_ctx *ctx);
/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
void ttm_tt_destroy(struct ttm_tt *ttm);
/**
* ttm_ttm_unbind:
*
* @ttm: The struct ttm_tt.
*
* Unbind a struct ttm_tt.
*/
void ttm_tt_unbind(struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
*
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_set_placement_caching:
*
* @ttm A struct ttm_tt the backing pages of which will change caching policy.
* @placement: Flag indicating the desired caching policy.
*
* This function will change caching policy of any default kernel mappings of
* the pages backing @ttm. If changing from cached to uncached or
* write-combined,
* all CPU caches will first be flushed to make sure the data of the pages
* hit RAM. This function may be very costly as it involves global TLB
* and cache flushes and potential page splitting / combining.
*/
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
/**
* ttm_tt_populate - allocate pages for a ttm
*
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to allocate pages for a ttm
*/
int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
*/
void ttm_tt_unpopulate(struct ttm_tt *ttm);
/*
* ttm_bo.c
*/
@@ -956,9 +745,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
spin_unlock(&bo->bdev->glob->lru_lock);
}
reservation_object_unlock(bo->resv);
}
@@ -1058,6 +847,15 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
* ttm_bo_pipeline_gutting.
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Pipelined gutting a BO of it's backing store.
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
/**
* ttm_io_prot
*
@@ -1071,29 +869,4 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
/**
* ttm_agp_tt_create
*
* @bdev: Pointer to a struct ttm_bo_device.
* @bridge: The agp bridge this device is sitting on.
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory. This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt.
*/
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
struct agp_bridge_data *bridge,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
#endif
#endif

View File

@@ -49,6 +49,8 @@
* @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @lower_mem_limit: include lower limit of swap space and lower limit of
* system memory.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
@@ -67,6 +69,7 @@ struct ttm_mem_global {
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
uint64_t lower_mem_limit;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
@@ -90,4 +93,6 @@ extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
uint64_t num_pages, struct ttm_operation_ctx *ctx);
#endif

272
include/drm/ttm/ttm_tt.h Normal file
View File

@@ -0,0 +1,272 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _TTM_TT_H_
#define _TTM_TT_H_
#include <linux/types.h>
struct ttm_tt;
struct ttm_mem_reg;
struct ttm_buffer_object;
struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
enum ttm_caching_state {
tt_uncached,
tt_wc,
tt_cached
};
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
*
* @ttm: Pointer to a struct ttm_tt.
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding.
*
* Bind the backend pages into the aperture in the location
* indicated by @bo_mem. This function should be able to handle
* differences between aperture and system page sizes.
*/
int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
* struct ttm_backend_func member unbind
*
* @ttm: Pointer to a struct ttm_tt.
*
* Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes.
*/
int (*unbind) (struct ttm_tt *ttm);
/**
* struct ttm_backend_func member destroy
*
* @ttm: Pointer to a struct ttm_tt.
*
* Destroy the backend. This will be call back from ttm_tt_destroy so
* don't call ttm_tt_destroy from the callback or infinite loop.
*/
void (*destroy) (struct ttm_tt *ttm);
};
/**
* struct ttm_tt
*
* @bdev: Pointer to a struct ttm_bo_device.
* @func: Pointer to a struct ttm_backend_func that describes
* the backend methods.
* pointer.
* @pages: Array of pages backing the data.
* @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device.
* @be: Pointer to the ttm backend.
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
struct ttm_bo_device *bdev;
struct ttm_backend_func *func;
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
tt_bound,
tt_unbound,
tt_unpopulated,
} state;
};
/**
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
dma_addr_t *dma_address;
struct list_head pages_list;
};
/**
* ttm_tt_create
*
* @bo: pointer to a struct ttm_buffer_object
* @zero_alloc: true if allocated pages needs to be zeroed
*
* Make sure we have a TTM structure allocated for the given BO.
* No pages are actually allocated.
*/
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
/**
* ttm_tt_init
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags);
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags);
/**
* ttm_tt_fini
*
* @ttm: the ttm_tt structure.
*
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_bind:
*
* @ttm: The struct ttm_tt containing backing pages.
* @bo_mem: The struct ttm_mem_reg identifying the binding location.
*
* Bind the pages of @ttm to an aperture location identified by @bo_mem
*/
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
struct ttm_operation_ctx *ctx);
/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
void ttm_tt_destroy(struct ttm_tt *ttm);
/**
* ttm_ttm_unbind:
*
* @ttm: The struct ttm_tt.
*
* Unbind a struct ttm_tt.
*/
void ttm_tt_unbind(struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
*
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_set_placement_caching:
*
* @ttm A struct ttm_tt the backing pages of which will change caching policy.
* @placement: Flag indicating the desired caching policy.
*
* This function will change caching policy of any default kernel mappings of
* the pages backing @ttm. If changing from cached to uncached or
* write-combined,
* all CPU caches will first be flushed to make sure the data of the pages
* hit RAM. This function may be very costly as it involves global TLB
* and cache flushes and potential page splitting / combining.
*/
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
/**
* ttm_tt_populate - allocate pages for a ttm
*
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to allocate pages for a ttm
*/
int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
*/
void ttm_tt_unpopulate(struct ttm_tt *ttm);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
/**
* ttm_agp_tt_create
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory. This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt.
*/
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
uint32_t page_flags);
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
#endif
#endif