Merge tag 'drm-misc-next-2016-12-30' of git://anongit.freedesktop.org/git/drm-misc into drm-intel-next-queued
Directly merge drm-misc into drm-intel since Dave is on vacation and we need the various drm-misc patches (fb format rework, drm mm fixes, selftest framework and others). Also pulled back -rc2 in first to resync with drm-intel-fixes and make sure I can reuse the exact rerere solutions from drm-tip for safety, and because I'm lazy. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
@@ -634,6 +634,19 @@ struct drm_device {
|
||||
int switch_power_state;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_drv_uses_atomic_modeset - check if the driver implements
|
||||
* atomic_commit()
|
||||
* @dev: DRM device
|
||||
*
|
||||
* This check is useful if drivers do not have DRIVER_ATOMIC set but
|
||||
* have atomic modesetting internally implemented.
|
||||
*/
|
||||
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
|
||||
{
|
||||
return dev->mode_config.funcs->atomic_commit != NULL;
|
||||
}
|
||||
|
||||
#include <drm/drm_irq.h>
|
||||
|
||||
#define DRM_SWITCH_POWER_ON 0
|
||||
|
@@ -145,6 +145,7 @@ struct __drm_crtcs_state {
|
||||
struct drm_crtc_state *state;
|
||||
struct drm_crtc_commit *commit;
|
||||
s64 __user *out_fence_ptr;
|
||||
unsigned last_vblank_count;
|
||||
};
|
||||
|
||||
struct __drm_connnectors_state {
|
||||
@@ -369,12 +370,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
|
||||
|
||||
void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct drm_minor;
|
||||
int drm_atomic_debugfs_init(struct drm_minor *minor);
|
||||
int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
|
||||
#endif
|
||||
|
||||
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->num_connector && \
|
||||
@@ -424,5 +419,4 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
|
||||
state->connectors_changed;
|
||||
}
|
||||
|
||||
|
||||
#endif /* DRM_ATOMIC_H_ */
|
||||
|
@@ -48,9 +48,6 @@ int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool pre_swap);
|
||||
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
@@ -33,10 +33,7 @@
|
||||
*
|
||||
* @refcount: Refcount for this master object.
|
||||
* @dev: Link back to the DRM device
|
||||
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
|
||||
* @unique_len: Length of unique field. Protected by drm_global_mutex.
|
||||
* @magic_map: Map of used authentication tokens. Protected by struct_mutex.
|
||||
* @lock: DRI lock information.
|
||||
* @lock: DRI1 lock information.
|
||||
* @driver_priv: Pointer to driver-private information.
|
||||
*
|
||||
* Note that master structures are only relevant for the legacy/primary device
|
||||
@@ -45,8 +42,20 @@
|
||||
struct drm_master {
|
||||
struct kref refcount;
|
||||
struct drm_device *dev;
|
||||
/**
|
||||
* @unique: Unique identifier: e.g. busid. Protected by struct
|
||||
* &drm_device master_mutex.
|
||||
*/
|
||||
char *unique;
|
||||
/**
|
||||
* @unique_len: Length of unique field. Protected by struct &drm_device
|
||||
* master_mutex.
|
||||
*/
|
||||
int unique_len;
|
||||
/**
|
||||
* @magic_map: Map of used authentication tokens. Protected by struct
|
||||
* &drm_device master_mutex.
|
||||
*/
|
||||
struct idr magic_map;
|
||||
struct drm_lock_data lock;
|
||||
void *driver_priv;
|
||||
|
@@ -201,8 +201,8 @@ struct drm_bridge {
|
||||
int drm_bridge_add(struct drm_bridge *bridge);
|
||||
void drm_bridge_remove(struct drm_bridge *bridge);
|
||||
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
|
||||
int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
|
||||
void drm_bridge_detach(struct drm_bridge *bridge);
|
||||
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
|
||||
struct drm_bridge *previous);
|
||||
|
||||
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
|
||||
const struct drm_display_mode *mode,
|
||||
|
@@ -117,7 +117,7 @@ struct drm_display_info {
|
||||
|
||||
/**
|
||||
* @pixel_clock: Maximum pixel clock supported by the sink, in units of
|
||||
* 100Hz. This mismatches the clok in &drm_display_mode (which is in
|
||||
* 100Hz. This mismatches the clock in &drm_display_mode (which is in
|
||||
* kHZ), because that's what the EDID uses as base unit.
|
||||
*/
|
||||
unsigned int pixel_clock;
|
||||
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
|
||||
* core drm connector interfaces. Everything added from this callback
|
||||
* should be unregistered in the early_unregister callback.
|
||||
*
|
||||
* This is called while holding drm_connector->mutex.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success, or a negative error code on failure.
|
||||
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
|
||||
* late_register(). It is called from drm_connector_unregister(),
|
||||
* early in the driver unload sequence to disable userspace access
|
||||
* before data structures are torndown.
|
||||
*
|
||||
* This is called while holding drm_connector->mutex.
|
||||
*/
|
||||
void (*early_unregister)(struct drm_connector *connector);
|
||||
|
||||
@@ -559,10 +563,6 @@ struct drm_cmdline_mode {
|
||||
* @interlace_allowed: can this connector handle interlaced modes?
|
||||
* @doublescan_allowed: can this connector handle doublescan?
|
||||
* @stereo_allowed: can this connector handle stereo modes?
|
||||
* @registered: is this connector exposed (registered) with userspace?
|
||||
* @modes: modes available on this connector (from fill_modes() + user)
|
||||
* @status: one of the drm_connector_status enums (connected, not, or unknown)
|
||||
* @probed_modes: list of modes derived directly from the display
|
||||
* @funcs: connector control functions
|
||||
* @edid_blob_ptr: DRM property containing EDID if present
|
||||
* @properties: property tracking for this connector
|
||||
@@ -607,6 +607,13 @@ struct drm_connector {
|
||||
|
||||
char *name;
|
||||
|
||||
/**
|
||||
* @mutex: Lock for general connector state, but currently only protects
|
||||
* @registered. Most of the connector state is still protected by the
|
||||
* mutex in &drm_mode_config.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
|
||||
/**
|
||||
* @index: Compacted connector index, which matches the position inside
|
||||
* the mode_config.list for drivers not supporting hot-add/removing. Can
|
||||
@@ -620,12 +627,32 @@ struct drm_connector {
|
||||
bool interlace_allowed;
|
||||
bool doublescan_allowed;
|
||||
bool stereo_allowed;
|
||||
/**
|
||||
* @registered: Is this connector exposed (registered) with userspace?
|
||||
* Protected by @mutex.
|
||||
*/
|
||||
bool registered;
|
||||
|
||||
/**
|
||||
* @modes:
|
||||
* Modes available on this connector (from fill_modes() + user).
|
||||
* Protected by dev->mode_config.mutex.
|
||||
*/
|
||||
struct list_head modes; /* list of modes on this connector */
|
||||
|
||||
/**
|
||||
* @status:
|
||||
* One of the drm_connector_status enums (connected, not, or unknown).
|
||||
* Protected by dev->mode_config.mutex.
|
||||
*/
|
||||
enum drm_connector_status status;
|
||||
|
||||
/* these are modes added by probing with DDC or the BIOS */
|
||||
/**
|
||||
* @probed_modes:
|
||||
* These are modes added by probing with DDC or the BIOS, before
|
||||
* filtering is applied. Used by the probe helpers.Protected by
|
||||
* dev->mode_config.mutex.
|
||||
*/
|
||||
struct list_head probed_modes;
|
||||
|
||||
/**
|
||||
@@ -634,6 +661,8 @@ struct drm_connector {
|
||||
* flat panels in embedded systems, the driver should initialize the
|
||||
* display_info.width_mm and display_info.height_mm fields with the
|
||||
* physical size of the display.
|
||||
*
|
||||
* Protected by dev->mode_config.mutex.
|
||||
*/
|
||||
struct drm_display_info display_info;
|
||||
const struct drm_connector_funcs *funcs;
|
||||
@@ -839,6 +868,11 @@ void drm_mode_put_tile_group(struct drm_device *dev,
|
||||
* @dev: the DRM device
|
||||
*
|
||||
* Iterate over all connectors of @dev.
|
||||
*
|
||||
* WARNING:
|
||||
*
|
||||
* This iterator is not safe against hotadd/removal of connectors and is
|
||||
* deprecated. Use drm_for_each_connector_iter() instead.
|
||||
*/
|
||||
#define drm_for_each_connector(connector, dev) \
|
||||
for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
|
||||
@@ -847,4 +881,37 @@ void drm_mode_put_tile_group(struct drm_device *dev,
|
||||
&connector->head != (&(dev)->mode_config.connector_list); \
|
||||
connector = list_next_entry(connector, head))
|
||||
|
||||
/**
|
||||
* struct drm_connector_list_iter - connector_list iterator
|
||||
*
|
||||
* This iterator tracks state needed to be able to walk the connector_list
|
||||
* within struct drm_mode_config. Only use together with
|
||||
* drm_connector_list_iter_get(), drm_connector_list_iter_put() and
|
||||
* drm_connector_list_iter_next() respectively the convenience macro
|
||||
* drm_for_each_connector_iter().
|
||||
*/
|
||||
struct drm_connector_list_iter {
|
||||
/* private: */
|
||||
struct drm_device *dev;
|
||||
struct drm_connector *conn;
|
||||
};
|
||||
|
||||
void drm_connector_list_iter_get(struct drm_device *dev,
|
||||
struct drm_connector_list_iter *iter);
|
||||
struct drm_connector *
|
||||
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
|
||||
void drm_connector_list_iter_put(struct drm_connector_list_iter *iter);
|
||||
|
||||
/**
|
||||
* drm_for_each_connector_iter - connector_list iterator macro
|
||||
* @connector: struct &drm_connector pointer used as cursor
|
||||
* @iter: struct &drm_connector_list_iter
|
||||
*
|
||||
* Note that @connector is only valid within the list body, if you want to use
|
||||
* @connector after calling drm_connector_list_iter_put() then you need to grab
|
||||
* your own reference first using drm_connector_reference().
|
||||
*/
|
||||
#define drm_for_each_connector_iter(connector, iter) \
|
||||
while ((connector = drm_connector_list_iter_next(iter)))
|
||||
|
||||
#endif
|
||||
|
@@ -39,7 +39,6 @@
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_property.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_edid.h>
|
||||
@@ -68,14 +67,12 @@ static inline uint64_t I642U64(int64_t val)
|
||||
}
|
||||
|
||||
struct drm_crtc;
|
||||
struct drm_encoder;
|
||||
struct drm_pending_vblank_event;
|
||||
struct drm_plane;
|
||||
struct drm_bridge;
|
||||
struct drm_atomic_state;
|
||||
|
||||
struct drm_crtc_helper_funcs;
|
||||
struct drm_encoder_helper_funcs;
|
||||
struct drm_plane_helper_funcs;
|
||||
|
||||
/**
|
||||
@@ -93,8 +90,6 @@ struct drm_plane_helper_funcs;
|
||||
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
|
||||
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
|
||||
* @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
|
||||
* @last_vblank_count: for helpers and drivers to capture the vblank of the
|
||||
* update to ensure framebuffer cleanup isn't done too early
|
||||
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
|
||||
* @mode: current mode timings
|
||||
* @mode_blob: &drm_property_blob for @mode
|
||||
@@ -140,9 +135,6 @@ struct drm_crtc_state {
|
||||
u32 connector_mask;
|
||||
u32 encoder_mask;
|
||||
|
||||
/* last_vblank_count: for vblank waits before cleanup */
|
||||
u32 last_vblank_count;
|
||||
|
||||
/* adjusted_mode: for use by helpers and drivers */
|
||||
struct drm_display_mode adjusted_mode;
|
||||
|
||||
|
@@ -25,8 +25,12 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_mode.h>
|
||||
#include <drm/drm_mode_object.h>
|
||||
|
||||
struct drm_encoder;
|
||||
|
||||
/**
|
||||
* struct drm_encoder_funcs - encoder controls
|
||||
*
|
||||
@@ -188,9 +192,6 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
|
||||
return encoder->index;
|
||||
}
|
||||
|
||||
/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
|
||||
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
|
||||
|
||||
/**
|
||||
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
|
||||
* @encoder: encoder to test
|
||||
|
@@ -29,6 +29,7 @@
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
|
||||
/**
|
||||
* struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
|
||||
|
@@ -121,6 +121,10 @@ struct drm_framebuffer {
|
||||
* @base: base modeset object structure, contains the reference count.
|
||||
*/
|
||||
struct drm_mode_object base;
|
||||
/**
|
||||
* @format: framebuffer format information
|
||||
*/
|
||||
const struct drm_format_info *format;
|
||||
/**
|
||||
* @funcs: framebuffer vfunc table
|
||||
*/
|
||||
@@ -165,28 +169,11 @@ struct drm_framebuffer {
|
||||
* pixels.
|
||||
*/
|
||||
unsigned int height;
|
||||
/**
|
||||
* @depth: Depth in bits per pixel for RGB formats. 0 for everything
|
||||
* else. Legacy information derived from @pixel_format, it's suggested to use
|
||||
* the DRM FOURCC codes and helper functions directly instead.
|
||||
*/
|
||||
unsigned int depth;
|
||||
/**
|
||||
* @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
|
||||
* everything else. Legacy information derived from @pixel_format, it's
|
||||
* suggested to use the DRM FOURCC codes and helper functions directly
|
||||
* instead.
|
||||
*/
|
||||
int bits_per_pixel;
|
||||
/**
|
||||
* @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
|
||||
* DRM_MODE_FB_MODIFIERS.
|
||||
*/
|
||||
int flags;
|
||||
/**
|
||||
* @pixel_format: DRM FOURCC code describing the pixel format.
|
||||
*/
|
||||
uint32_t pixel_format; /* fourcc format */
|
||||
/**
|
||||
* @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
|
||||
* IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
|
||||
@@ -282,4 +269,10 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
|
||||
struct drm_framebuffer, head); \
|
||||
&fb->head != (&(dev)->mode_config.fb_list); \
|
||||
fb = list_next_entry(fb, head))
|
||||
|
||||
int drm_framebuffer_plane_width(int width,
|
||||
const struct drm_framebuffer *fb, int plane);
|
||||
int drm_framebuffer_plane_height(int height,
|
||||
const struct drm_framebuffer *fb, int plane);
|
||||
|
||||
#endif
|
||||
|
@@ -1,6 +1,7 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
|
||||
* Copyright 2016 Intel Corporation
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -48,6 +49,12 @@
|
||||
#include <linux/stackdepot.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_DEBUG_MM
|
||||
#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
|
||||
#else
|
||||
#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
||||
#endif
|
||||
|
||||
enum drm_mm_search_flags {
|
||||
DRM_MM_SEARCH_DEFAULT = 0,
|
||||
DRM_MM_SEARCH_BEST = 1 << 0,
|
||||
@@ -67,11 +74,8 @@ struct drm_mm_node {
|
||||
struct list_head hole_stack;
|
||||
struct rb_node rb;
|
||||
unsigned hole_follows : 1;
|
||||
unsigned scanned_block : 1;
|
||||
unsigned scanned_prev_free : 1;
|
||||
unsigned scanned_next_free : 1;
|
||||
unsigned scanned_preceeds_hole : 1;
|
||||
unsigned allocated : 1;
|
||||
bool scanned_block : 1;
|
||||
unsigned long color;
|
||||
u64 start;
|
||||
u64 size;
|
||||
@@ -91,32 +95,44 @@ struct drm_mm {
|
||||
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
|
||||
struct rb_root interval_tree;
|
||||
|
||||
unsigned int scan_check_range : 1;
|
||||
unsigned scan_alignment;
|
||||
unsigned long scan_color;
|
||||
u64 scan_size;
|
||||
u64 scan_hit_start;
|
||||
u64 scan_hit_end;
|
||||
unsigned scanned_blocks;
|
||||
u64 scan_start;
|
||||
u64 scan_end;
|
||||
struct drm_mm_node *prev_scanned_node;
|
||||
|
||||
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
|
||||
void (*color_adjust)(const struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
u64 *start, u64 *end);
|
||||
|
||||
unsigned long scan_active;
|
||||
};
|
||||
|
||||
struct drm_mm_scan {
|
||||
struct drm_mm *mm;
|
||||
|
||||
u64 size;
|
||||
u64 alignment;
|
||||
u64 remainder_mask;
|
||||
|
||||
u64 range_start;
|
||||
u64 range_end;
|
||||
|
||||
u64 hit_start;
|
||||
u64 hit_end;
|
||||
|
||||
unsigned long color;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_mm_node_allocated - checks whether a node is allocated
|
||||
* @node: drm_mm_node to check
|
||||
*
|
||||
* Drivers should use this helpers for proper encapusulation of drm_mm
|
||||
* Drivers are required to clear a node prior to using it with the
|
||||
* drm_mm range manager.
|
||||
*
|
||||
* Drivers should use this helper for proper encapsulation of drm_mm
|
||||
* internals.
|
||||
*
|
||||
* Returns:
|
||||
* True if the @node is allocated.
|
||||
*/
|
||||
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
|
||||
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
|
||||
{
|
||||
return node->allocated;
|
||||
}
|
||||
@@ -125,18 +141,37 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
|
||||
* drm_mm_initialized - checks whether an allocator is initialized
|
||||
* @mm: drm_mm to check
|
||||
*
|
||||
* Drivers should use this helpers for proper encapusulation of drm_mm
|
||||
* Drivers should clear the struct drm_mm prior to initialisation if they
|
||||
* want to use this function.
|
||||
*
|
||||
* Drivers should use this helper for proper encapsulation of drm_mm
|
||||
* internals.
|
||||
*
|
||||
* Returns:
|
||||
* True if the @mm is initialized.
|
||||
*/
|
||||
static inline bool drm_mm_initialized(struct drm_mm *mm)
|
||||
static inline bool drm_mm_initialized(const struct drm_mm *mm)
|
||||
{
|
||||
return mm->hole_stack.next;
|
||||
}
|
||||
|
||||
static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
/**
|
||||
* drm_mm_hole_follows - checks whether a hole follows this node
|
||||
* @node: drm_mm_node to check
|
||||
*
|
||||
* Holes are embedded into the drm_mm using the tail of a drm_mm_node.
|
||||
* If you wish to know whether a hole follows this particular node,
|
||||
* query this function.
|
||||
*
|
||||
* Returns:
|
||||
* True if a hole follows the @node.
|
||||
*/
|
||||
static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
|
||||
{
|
||||
return node->hole_follows;
|
||||
}
|
||||
|
||||
static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
|
||||
{
|
||||
return hole_node->start + hole_node->size;
|
||||
}
|
||||
@@ -145,20 +180,20 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
* drm_mm_hole_node_start - computes the start of the hole following @node
|
||||
* @hole_node: drm_mm_node which implicitly tracks the following hole
|
||||
*
|
||||
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not
|
||||
* inspect holes themselves. Drivers must check first whether a hole indeed
|
||||
* follows by looking at node->hole_follows.
|
||||
* This is useful for driver-specific debug dumpers. Otherwise drivers should
|
||||
* not inspect holes themselves. Drivers must check first whether a hole indeed
|
||||
* follows by looking at drm_mm_hole_follows()
|
||||
*
|
||||
* Returns:
|
||||
* Start of the subsequent hole.
|
||||
*/
|
||||
static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
|
||||
{
|
||||
BUG_ON(!hole_node->hole_follows);
|
||||
DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
|
||||
return __drm_mm_hole_node_start(hole_node);
|
||||
}
|
||||
|
||||
static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
|
||||
{
|
||||
return list_next_entry(hole_node, node_list)->start;
|
||||
}
|
||||
@@ -167,18 +202,32 @@ static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
* drm_mm_hole_node_end - computes the end of the hole following @node
|
||||
* @hole_node: drm_mm_node which implicitly tracks the following hole
|
||||
*
|
||||
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not
|
||||
* inspect holes themselves. Drivers must check first whether a hole indeed
|
||||
* follows by looking at node->hole_follows.
|
||||
* This is useful for driver-specific debug dumpers. Otherwise drivers should
|
||||
* not inspect holes themselves. Drivers must check first whether a hole indeed
|
||||
* follows by looking at drm_mm_hole_follows().
|
||||
*
|
||||
* Returns:
|
||||
* End of the subsequent hole.
|
||||
*/
|
||||
static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
|
||||
{
|
||||
return __drm_mm_hole_node_end(hole_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mm_nodes - list of nodes under the drm_mm range manager
|
||||
* @mm: the struct drm_mm range manger
|
||||
*
|
||||
* As the drm_mm range manager hides its node_list deep with its
|
||||
* structure, extracting it looks painful and repetitive. This is
|
||||
* not expected to be used outside of the drm_mm_for_each_node()
|
||||
* macros and similar internal functions.
|
||||
*
|
||||
* Returns:
|
||||
* The node list, may be empty.
|
||||
*/
|
||||
#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
|
||||
|
||||
/**
|
||||
* drm_mm_for_each_node - iterator to walk over all allocated nodes
|
||||
* @entry: drm_mm_node structure to assign to in each iteration step
|
||||
@@ -187,9 +236,20 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
* This iterator walks over all nodes in the range allocator. It is implemented
|
||||
* with list_for_each, so not save against removal of elements.
|
||||
*/
|
||||
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
||||
&(mm)->head_node.node_list, \
|
||||
node_list)
|
||||
#define drm_mm_for_each_node(entry, mm) \
|
||||
list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
|
||||
|
||||
/**
|
||||
* drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
|
||||
* @entry: drm_mm_node structure to assign to in each iteration step
|
||||
* @next: drm_mm_node structure to store the next step
|
||||
* @mm: drm_mm allocator to walk
|
||||
*
|
||||
* This iterator walks over all nodes in the range allocator. It is implemented
|
||||
* with list_for_each_safe, so save against removal of elements.
|
||||
*/
|
||||
#define drm_mm_for_each_node_safe(entry, next, mm) \
|
||||
list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
|
||||
|
||||
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
|
||||
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
|
||||
@@ -225,49 +285,16 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
* Basic range manager support (drm_mm.c)
|
||||
*/
|
||||
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
|
||||
|
||||
int drm_mm_insert_node_generic(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags);
|
||||
/**
|
||||
* drm_mm_insert_node - search for space and insert @node
|
||||
* @mm: drm_mm to allocate from
|
||||
* @node: preallocate node to insert
|
||||
* @size: size of the allocation
|
||||
* @alignment: alignment of the allocation
|
||||
* @flags: flags to fine-tune the allocation
|
||||
*
|
||||
* This is a simplified version of drm_mm_insert_node_generic() with @color set
|
||||
* to 0.
|
||||
*
|
||||
* The preallocated node must be cleared to 0.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
static inline int drm_mm_insert_node(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
}
|
||||
|
||||
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
u64 alignment,
|
||||
unsigned long color,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags);
|
||||
|
||||
/**
|
||||
* drm_mm_insert_node_in_range - ranged search for space and insert @node
|
||||
* @mm: drm_mm to allocate from
|
||||
@@ -289,7 +316,7 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
||||
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
u64 alignment,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags)
|
||||
@@ -299,16 +326,81 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mm_insert_node_generic - search for space and insert @node
|
||||
* @mm: drm_mm to allocate from
|
||||
* @node: preallocate node to insert
|
||||
* @size: size of the allocation
|
||||
* @alignment: alignment of the allocation
|
||||
* @color: opaque tag value to use for this node
|
||||
* @sflags: flags to fine-tune the allocation search
|
||||
* @aflags: flags to fine-tune the allocation behavior
|
||||
*
|
||||
* The preallocated node must be cleared to 0.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
static inline int
|
||||
drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
u64 size, u64 alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
{
|
||||
return drm_mm_insert_node_in_range_generic(mm, node,
|
||||
size, alignment, 0,
|
||||
0, U64_MAX,
|
||||
sflags, aflags);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mm_insert_node - search for space and insert @node
|
||||
* @mm: drm_mm to allocate from
|
||||
* @node: preallocate node to insert
|
||||
* @size: size of the allocation
|
||||
* @alignment: alignment of the allocation
|
||||
* @flags: flags to fine-tune the allocation
|
||||
*
|
||||
* This is a simplified version of drm_mm_insert_node_generic() with @color set
|
||||
* to 0.
|
||||
*
|
||||
* The preallocated node must be cleared to 0.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
static inline int drm_mm_insert_node(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
u64 size,
|
||||
u64 alignment,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
return drm_mm_insert_node_generic(mm, node,
|
||||
size, alignment, 0,
|
||||
flags, DRM_MM_CREATE_DEFAULT);
|
||||
}
|
||||
|
||||
void drm_mm_remove_node(struct drm_mm_node *node);
|
||||
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
|
||||
void drm_mm_init(struct drm_mm *mm,
|
||||
u64 start,
|
||||
u64 size);
|
||||
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
|
||||
void drm_mm_takedown(struct drm_mm *mm);
|
||||
bool drm_mm_clean(struct drm_mm *mm);
|
||||
|
||||
/**
|
||||
* drm_mm_clean - checks whether an allocator is clean
|
||||
* @mm: drm_mm allocator to check
|
||||
*
|
||||
* Returns:
|
||||
* True if the allocator is completely free, false if there's still a node
|
||||
* allocated in it.
|
||||
*/
|
||||
static inline bool drm_mm_clean(const struct drm_mm *mm)
|
||||
{
|
||||
return list_empty(drm_mm_nodes(mm));
|
||||
}
|
||||
|
||||
struct drm_mm_node *
|
||||
__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
|
||||
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
|
||||
|
||||
/**
|
||||
* drm_mm_for_each_node_in_range - iterator to walk over a range of
|
||||
@@ -329,22 +421,50 @@ __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
|
||||
node__ && node__->start < (end__); \
|
||||
node__ = list_next_entry(node__, node_list))
|
||||
|
||||
void drm_mm_init_scan(struct drm_mm *mm,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color);
|
||||
void drm_mm_init_scan_with_range(struct drm_mm *mm,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
u64 start,
|
||||
u64 end);
|
||||
bool drm_mm_scan_add_block(struct drm_mm_node *node);
|
||||
bool drm_mm_scan_remove_block(struct drm_mm_node *node);
|
||||
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
|
||||
struct drm_mm *mm,
|
||||
u64 size, u64 alignment, unsigned long color,
|
||||
u64 start, u64 end,
|
||||
unsigned int flags);
|
||||
|
||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
|
||||
/**
|
||||
* drm_mm_scan_init - initialize lru scanning
|
||||
* @scan: scan state
|
||||
* @mm: drm_mm to scan
|
||||
* @size: size of the allocation
|
||||
* @alignment: alignment of the allocation
|
||||
* @color: opaque tag value to use for the allocation
|
||||
* @flags: flags to specify how the allocation will be performed afterwards
|
||||
*
|
||||
* This simply sets up the scanning routines with the parameters for the desired
|
||||
* hole.
|
||||
*
|
||||
* Warning:
|
||||
* As long as the scan list is non-empty, no other operations than
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
|
||||
struct drm_mm *mm,
|
||||
u64 size,
|
||||
u64 alignment,
|
||||
unsigned long color,
|
||||
unsigned int flags)
|
||||
{
|
||||
drm_mm_scan_init_with_range(scan, mm,
|
||||
size, alignment, color,
|
||||
0, U64_MAX,
|
||||
flags);
|
||||
}
|
||||
|
||||
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
|
||||
struct drm_mm_node *node);
|
||||
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
|
||||
struct drm_mm_node *node);
|
||||
struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
|
||||
|
||||
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
|
||||
int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -365,7 +365,13 @@ struct drm_mode_config {
|
||||
struct list_head fb_list;
|
||||
|
||||
/**
|
||||
* @num_connector: Number of connectors on this device.
|
||||
* @connector_list_lock: Protects @num_connector and
|
||||
* @connector_list.
|
||||
*/
|
||||
spinlock_t connector_list_lock;
|
||||
/**
|
||||
* @num_connector: Number of connectors on this device. Protected by
|
||||
* @connector_list_lock.
|
||||
*/
|
||||
int num_connector;
|
||||
/**
|
||||
@@ -373,7 +379,9 @@ struct drm_mode_config {
|
||||
*/
|
||||
struct ida connector_ida;
|
||||
/**
|
||||
* @connector_list: List of connector objects.
|
||||
* @connector_list: List of connector objects. Protected by
|
||||
* @connector_list_lock. Only use drm_for_each_connector_iter() and
|
||||
* struct &drm_connector_list_iter to walk this list.
|
||||
*/
|
||||
struct list_head connector_list;
|
||||
int num_encoder;
|
||||
|
@@ -27,7 +27,8 @@
|
||||
|
||||
void drm_helper_move_panel_connectors_to_head(struct drm_device *);
|
||||
|
||||
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
|
||||
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
|
||||
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#define __DRM_MODESET_HELPER_VTABLES_H__
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
|
||||
/**
|
||||
* DOC: overview
|
||||
|
@@ -114,8 +114,6 @@ struct drm_simple_display_pipe {
|
||||
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
|
||||
struct drm_bridge *bridge);
|
||||
|
||||
void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
|
||||
|
||||
int drm_simple_display_pipe_init(struct drm_device *dev,
|
||||
struct drm_simple_display_pipe *pipe,
|
||||
const struct drm_simple_display_pipe_funcs *funcs,
|
||||
|
@@ -39,23 +39,6 @@ struct dma_buf_attachment;
|
||||
|
||||
/**
|
||||
* struct dma_buf_ops - operations possible on struct dma_buf
|
||||
* @attach: [optional] allows different devices to 'attach' themselves to the
|
||||
* given buffer. It might return -EBUSY to signal that backing storage
|
||||
* is already allocated and incompatible with the requirements
|
||||
* of requesting device.
|
||||
* @detach: [optional] detach a given device from this buffer.
|
||||
* @map_dma_buf: returns list of scatter pages allocated, increases usecount
|
||||
* of the buffer. Requires atleast one attach to be called
|
||||
* before. Returned sg list should already be mapped into
|
||||
* _device_ address space. This call may sleep. May also return
|
||||
* -EINTR. Should return -EINVAL if attach hasn't been called yet.
|
||||
* @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
|
||||
* pages.
|
||||
* @release: release this buffer; to be called after the last dma_buf_put.
|
||||
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
||||
* caches and allocate backing storage (if not yet done)
|
||||
* respectively pin the object into memory.
|
||||
* @end_cpu_access: [optional] called after cpu access to flush caches.
|
||||
* @kmap_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
@@ -63,43 +46,206 @@ struct dma_buf_attachment;
|
||||
* This Callback must not sleep.
|
||||
* @kmap: maps a page from the buffer into kernel address space.
|
||||
* @kunmap: [optional] unmaps a page from the buffer.
|
||||
* @mmap: used to expose the backing storage to userspace. Note that the
|
||||
* mapping needs to be coherent - if the exporter doesn't directly
|
||||
* support this, it needs to fake coherency by shooting down any ptes
|
||||
* when transitioning away from the cpu domain.
|
||||
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
* @vunmap: [optional] unmaps a vmap from the buffer
|
||||
*/
|
||||
struct dma_buf_ops {
|
||||
/**
|
||||
* @attach:
|
||||
*
|
||||
* This is called from dma_buf_attach() to make sure that a given
|
||||
* &device can access the provided &dma_buf. Exporters which support
|
||||
* buffer objects in special locations like VRAM or device-specific
|
||||
* carveout areas should check whether the buffer could be move to
|
||||
* system memory (or directly accessed by the provided device), and
|
||||
* otherwise need to fail the attach operation.
|
||||
*
|
||||
* The exporter should also in general check whether the current
|
||||
* allocation fullfills the DMA constraints of the new device. If this
|
||||
* is not the case, and the allocation cannot be moved, it should also
|
||||
* fail the attach operation.
|
||||
*
|
||||
* Any exporter-private housekeeping data can be stored in the priv
|
||||
* pointer of &dma_buf_attachment structure.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success, negative error code on failure. It might return -EBUSY
|
||||
* to signal that backing storage is already allocated and incompatible
|
||||
* with the requirements of requesting device.
|
||||
*/
|
||||
int (*attach)(struct dma_buf *, struct device *,
|
||||
struct dma_buf_attachment *);
|
||||
struct dma_buf_attachment *);
|
||||
|
||||
/**
|
||||
* @detach:
|
||||
*
|
||||
* This is called by dma_buf_detach() to release a &dma_buf_attachment.
|
||||
* Provided so that exporters can clean up any housekeeping for an
|
||||
* &dma_buf_attachment.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
|
||||
|
||||
/* For {map,unmap}_dma_buf below, any specific buffer attributes
|
||||
* required should get added to device_dma_parameters accessible
|
||||
* via dev->dma_params.
|
||||
/**
|
||||
* @map_dma_buf:
|
||||
*
|
||||
* This is called by dma_buf_map_attachment() and is used to map a
|
||||
* shared &dma_buf into device address space, and it is mandatory. It
|
||||
* can only be called if @attach has been called successfully. This
|
||||
* essentially pins the DMA buffer into place, and it cannot be moved
|
||||
* any more
|
||||
*
|
||||
* This call may sleep, e.g. when the backing storage first needs to be
|
||||
* allocated, or moved to a location suitable for all currently attached
|
||||
* devices.
|
||||
*
|
||||
* Note that any specific buffer attributes required for this function
|
||||
* should get added to device_dma_parameters accessible via
|
||||
* device->dma_params from the &dma_buf_attachment. The @attach callback
|
||||
* should also check these constraints.
|
||||
*
|
||||
* If this is being called for the first time, the exporter can now
|
||||
* choose to scan through the list of attachments for this buffer,
|
||||
* collate the requirements of the attached devices, and choose an
|
||||
* appropriate backing storage for the buffer.
|
||||
*
|
||||
* Based on enum dma_data_direction, it might be possible to have
|
||||
* multiple users accessing at the same time (for reading, maybe), or
|
||||
* any other kind of sharing that the exporter might wish to make
|
||||
* available to buffer-users.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* A &sg_table scatter list of or the backing storage of the DMA buffer,
|
||||
* already mapped into the device address space of the &device attached
|
||||
* with the provided &dma_buf_attachment.
|
||||
*
|
||||
* On failure, returns a negative error value wrapped into a pointer.
|
||||
* May also return -EINTR when a signal was received while being
|
||||
* blocked.
|
||||
*/
|
||||
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
|
||||
enum dma_data_direction);
|
||||
enum dma_data_direction);
|
||||
/**
|
||||
* @unmap_dma_buf:
|
||||
*
|
||||
* This is called by dma_buf_unmap_attachment() and should unmap and
|
||||
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
|
||||
* It should also unpin the backing storage if this is the last mapping
|
||||
* of the DMA buffer, it the exporter supports backing storage
|
||||
* migration.
|
||||
*/
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
|
||||
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
|
||||
* if the call would block.
|
||||
*/
|
||||
|
||||
/* after final dma_buf_put() */
|
||||
/**
|
||||
* @release:
|
||||
*
|
||||
* Called after the last dma_buf_put to release the &dma_buf, and
|
||||
* mandatory.
|
||||
*/
|
||||
void (*release)(struct dma_buf *);
|
||||
|
||||
/**
|
||||
* @begin_cpu_access:
|
||||
*
|
||||
* This is called from dma_buf_begin_cpu_access() and allows the
|
||||
* exporter to ensure that the memory is actually available for cpu
|
||||
* access - the exporter might need to allocate or swap-in and pin the
|
||||
* backing storage. The exporter also needs to ensure that cpu access is
|
||||
* coherent for the access direction. The direction can be used by the
|
||||
* exporter to optimize the cache flushing, i.e. access with a different
|
||||
* direction (read instead of write) might return stale or even bogus
|
||||
* data (e.g. when the exporter needs to copy the data to temporary
|
||||
* storage).
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
|
||||
* from userspace (where storage shouldn't be pinned to avoid handing
|
||||
* de-factor mlock rights to userspace) and for the kernel-internal
|
||||
* users of the various kmap interfaces, where the backing storage must
|
||||
* be pinned to guarantee that the atomic kmap calls can succeed. Since
|
||||
* there's no in-kernel users of the kmap interfaces yet this isn't a
|
||||
* real problem.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. This can for
|
||||
* example fail when the backing storage can't be allocated. Can also
|
||||
* return -ERESTARTSYS or -EINTR when the call has been interrupted and
|
||||
* needs to be restarted.
|
||||
*/
|
||||
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @end_cpu_access:
|
||||
*
|
||||
* This is called from dma_buf_end_cpu_access() when the importer is
|
||||
* done accessing the CPU. The exporter can use this to flush caches and
|
||||
* unpin any resources pinned in @begin_cpu_access.
|
||||
* The result of any dma_buf kmap calls after end_cpu_access is
|
||||
* undefined.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. Can return
|
||||
* -ERESTARTSYS or -EINTR when the call has been interrupted and needs
|
||||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*kmap)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap)(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
/**
|
||||
* @mmap:
|
||||
*
|
||||
* This callback is used by the dma_buf_mmap() function
|
||||
*
|
||||
* Note that the mapping needs to be incoherent, userspace is expected
|
||||
* to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
|
||||
*
|
||||
* Because dma-buf buffers have invariant size over their lifetime, the
|
||||
* dma-buf core checks whether a vma is too large and rejects such
|
||||
* mappings. The exporter hence does not need to duplicate this check.
|
||||
* Drivers do not need to check this themselves.
|
||||
*
|
||||
* If an exporter needs to manually flush caches and hence needs to fake
|
||||
* coherency for mmap support, it needs to be able to zap all the ptes
|
||||
* pointing at the backing storage. Now linux mm needs a struct
|
||||
* address_space associated with the struct file stored in vma->vm_file
|
||||
* to do that with the function unmap_mapping_range. But the dma_buf
|
||||
* framework only backs every dma_buf fd with the anon_file struct file,
|
||||
* i.e. all dma_bufs share the same file.
|
||||
*
|
||||
* Hence exporters need to setup their own file (and address_space)
|
||||
* association by setting vma->vm_file and adjusting vma->vm_pgoff in
|
||||
* the dma_buf mmap callback. In the specific case of a gem driver the
|
||||
* exporter could use the shmem file already provided by gem (and set
|
||||
* vm_pgoff = 0). Exporters can then zap ptes by unmapping the
|
||||
* corresponding range of the struct address_space associated with their
|
||||
* own file.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
|
||||
|
||||
void *(*vmap)(struct dma_buf *);
|
||||
@@ -124,6 +270,15 @@ struct dma_buf_ops {
|
||||
* @poll: for userspace poll support
|
||||
* @cb_excl: for userspace poll support
|
||||
* @cb_shared: for userspace poll support
|
||||
*
|
||||
* This represents a shared buffer, created by calling dma_buf_export(). The
|
||||
* userspace representation is a normal file descriptor, which can be created by
|
||||
* calling dma_buf_fd().
|
||||
*
|
||||
* Shared dma buffers are reference counted using dma_buf_put() and
|
||||
* get_dma_buf().
|
||||
*
|
||||
* Device DMA access is handled by the separate struct &dma_buf_attachment.
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
@@ -160,6 +315,11 @@ struct dma_buf {
|
||||
* This structure holds the attachment information between the dma_buf buffer
|
||||
* and its user device(s). The list contains one attachment struct per device
|
||||
* attached to the buffer.
|
||||
*
|
||||
* An attachment is created by calling dma_buf_attach(), and released again by
|
||||
* calling dma_buf_detach(). The DMA mapping itself needed to initiate a
|
||||
* transfer is created by dma_buf_map_attachment() and freed again by calling
|
||||
* dma_buf_unmap_attachment().
|
||||
*/
|
||||
struct dma_buf_attachment {
|
||||
struct dma_buf *dmabuf;
|
||||
@@ -192,9 +352,11 @@ struct dma_buf_export_info {
|
||||
};
|
||||
|
||||
/**
|
||||
* helper macro for exporters; zeros and fills in most common values
|
||||
*
|
||||
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
|
||||
* @name: export-info name
|
||||
*
|
||||
* DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct &dma_buf_export_info,
|
||||
* zeroes it out and pre-populates exp_name in it.
|
||||
*/
|
||||
#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
|
||||
struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
|
||||
|
@@ -133,6 +133,6 @@ static inline int kref_put_mutex(struct kref *kref,
|
||||
*/
|
||||
static inline int __must_check kref_get_unless_zero(struct kref *kref)
|
||||
{
|
||||
return atomic_add_unless(&kref->refcount, 1, 0);
|
||||
return atomic_inc_not_zero(&kref->refcount);
|
||||
}
|
||||
#endif /* _KREF_H_ */
|
||||
|
37
include/linux/prime_numbers.h
Normal file
37
include/linux/prime_numbers.h
Normal file
@@ -0,0 +1,37 @@
|
||||
#ifndef __LINUX_PRIME_NUMBERS_H
|
||||
#define __LINUX_PRIME_NUMBERS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
bool is_prime_number(unsigned long x);
|
||||
unsigned long next_prime_number(unsigned long x);
|
||||
|
||||
/**
|
||||
* for_each_prime_number - iterate over each prime upto a value
|
||||
* @prime: the current prime number in this iteration
|
||||
* @max: the upper limit
|
||||
*
|
||||
* Starting from the first prime number 2 iterate over each prime number up to
|
||||
* the @max value. On each iteration, @prime is set to the current prime number.
|
||||
* @max should be less than ULONG_MAX to ensure termination. To begin with
|
||||
* @prime set to 1 on the first iteration use for_each_prime_number_from()
|
||||
* instead.
|
||||
*/
|
||||
#define for_each_prime_number(prime, max) \
|
||||
for_each_prime_number_from((prime), 2, (max))
|
||||
|
||||
/**
|
||||
* for_each_prime_number_from - iterate over each prime upto a value
|
||||
* @prime: the current prime number in this iteration
|
||||
* @from: the initial value
|
||||
* @max: the upper limit
|
||||
*
|
||||
* Starting from @from iterate over each successive prime number up to the
|
||||
* @max value. On each iteration, @prime is set to the current prime number.
|
||||
* @max should be less than ULONG_MAX, and @from less than @max, to ensure
|
||||
* termination.
|
||||
*/
|
||||
#define for_each_prime_number_from(prime, from, max) \
|
||||
for (prime = (from); prime <= (max); prime = next_prime_number(prime))
|
||||
|
||||
#endif /* !__LINUX_PRIME_NUMBERS_H */
|
@@ -144,6 +144,40 @@ reservation_object_get_list(struct reservation_object *obj)
|
||||
reservation_object_held(obj));
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_lock - lock the reservation object
|
||||
* @obj: the reservation object
|
||||
* @ctx: the locking context
|
||||
*
|
||||
* Locks the reservation object for exclusive access and modification. Note,
|
||||
* that the lock is only against other writers, readers will run concurrently
|
||||
* with a writer under RCU. The seqlock is used to notify readers if they
|
||||
* overlap with a writer.
|
||||
*
|
||||
* As the reservation object may be locked by multiple parties in an
|
||||
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
|
||||
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
|
||||
* object may be locked by itself by passing NULL as @ctx.
|
||||
*/
|
||||
static inline int
|
||||
reservation_object_lock(struct reservation_object *obj,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
return ww_mutex_lock(&obj->lock, ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_unlock - unlock the reservation object
|
||||
* @obj: the reservation object
|
||||
*
|
||||
* Unlocks the reservation object following exclusive access.
|
||||
*/
|
||||
static inline void
|
||||
reservation_object_unlock(struct reservation_object *obj)
|
||||
{
|
||||
ww_mutex_unlock(&obj->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_get_excl - get the reservation object's
|
||||
* exclusive fence, with update-side lock held
|
||||
|
@@ -154,6 +154,7 @@ extern "C" {
|
||||
|
||||
/* Vendor Ids: */
|
||||
#define DRM_FORMAT_MOD_NONE 0
|
||||
#define DRM_FORMAT_MOD_VENDOR_NONE 0
|
||||
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
|
||||
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
|
||||
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
|
||||
@@ -172,6 +173,16 @@ extern "C" {
|
||||
* authoritative source for all of these.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Linear Layout
|
||||
*
|
||||
* Just plain linear layout. Note that this is different from no specifying any
|
||||
* modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
|
||||
* which tells the driver to also take driver-internal information into account
|
||||
* and so might actually result in a tiled framebuffer.
|
||||
*/
|
||||
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
|
||||
|
||||
/* Intel framebuffer modifiers */
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user