Merge drm/drm-next into drm-intel-next-queued

We need a backmerge to get DP_DPCD_REV_14 before we push other
i915 changes to dinq that could break compilation.

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Rodrigo Vivi
2018-07-23 09:13:12 -07:00
13380 changed files with 538236 additions and 634893 deletions

View File

@@ -72,12 +72,41 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
/**
* DOC: memory domains
*
* %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
* Memory in this pool could be swapped out to disk if there is pressure.
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
* pages of system memory, allows GPU access system memory in a linezrized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
* carved out by the BIOS.
*
* %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
* across shader threads.
*
* %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
* execution of all the waves on a device.
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -95,6 +124,10 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
* for the second page onward should be set to NC.
*/
#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -473,7 +506,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_NUM 8
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -482,6 +516,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -520,6 +555,10 @@ union drm_amdgpu_cs {
/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
/* The IB fence should do the L2 writeback but not invalidate any shader
* caches (L2/vL1/sL1/I$). */
#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -620,6 +659,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_ASD 0x0d
/* Subquery id: Query VCN firmware version */
#define AMDGPU_INFO_FW_VCN 0x0e
/* Subquery id: Query GFX RLC SRLC firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
/* Subquery id: Query GFX RLC SRLG firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
/* Subquery id: Query GFX RLC SRLS firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */

View File

@@ -680,6 +680,22 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ATOMIC 3
/**
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
/**
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
* writing back to memory the scene setup in the commit. Depends on client
* also supporting DRM_CLIENT_CAP_ATOMIC
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;

View File

@@ -183,6 +183,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
@@ -384,6 +385,23 @@ extern "C" {
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
fourcc_mod_code(NVIDIA, 0x15)
/*
* Some Broadcom modifiers take parameters, for example the number of
* vertical lines in the image. Reserve the lower 32 bits for modifier
* type, and the next 24 bits for parameters. Top 8 bits are the
* vendor code.
*/
#define __fourcc_mod_broadcom_param_shift 8
#define __fourcc_mod_broadcom_param_bits 48
#define fourcc_mod_broadcom_code(val, params) \
fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
#define fourcc_mod_broadcom_param(m) \
((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
#define fourcc_mod_broadcom_mod(m) \
((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
__fourcc_mod_broadcom_param_shift))
/*
* Broadcom VC4 "T" format
*
@@ -405,6 +423,151 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
/*
* Broadcom SAND format
*
* This is the native format that the H.264 codec block uses. For VC4
* HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
*
* The image can be considered to be split into columns, and the
* columns are placed consecutively into memory. The width of those
* columns can be either 32, 64, 128, or 256 pixels, but in practice
* only 128 pixel columns are used.
*
* The pitch between the start of each column is set to optimally
* switch between SDRAM banks. This is passed as the number of lines
* of column width in the modifier (we can't use the stride value due
* to various core checks that look at it , so you should set the
* stride to width*cpp).
*
* Note that the column height for this format modifier is the same
* for all of the planes, assuming that each column contains both Y
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(2, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(3, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(4, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(5, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
/* Broadcom UIF format
*
* This is the common format for the current Broadcom multimedia
* blocks, including V3D 3.x and newer, newer video codecs, and
* displays.
*
* The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
* and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
* stored in columns, with padding between the columns to ensure that
* moving from one column to the next doesn't hit the same SDRAM page
* bank.
*
* To calculate the padding, it is assumed that each hardware block
* and the software driving it knows the platform's SDRAM page size,
* number of banks, and XOR address, and that it's identical between
* all blocks using the format. This tiling modifier will use XOR as
* necessary to reduce the padding. If a hardware block can't do XOR,
* the assumption is that a no-XOR tiling modifier will be created.
*/
#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
/*
* Arm Framebuffer Compression (AFBC) modifiers
*
* AFBC is a proprietary lossless image compression protocol and format.
* It provides fine-grained random access and minimizes the amount of data
* transferred between IP blocks.
*
* AFBC has several features which may be supported and/or used, which are
* represented using bits in the modifier. Not all combinations are valid,
* and different devices or use-cases may support different combinations.
*/
#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
/*
* AFBC superblock size
*
* Indicates the superblock size(s) used for the AFBC buffer. The buffer
* size (in pixels) must be aligned to a multiple of the superblock size.
* Four lowest significant bits(LSBs) are reserved for block size.
*/
#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
/*
* AFBC lossless colorspace transform
*
* Indicates that the buffer makes use of the AFBC lossless colorspace
* transform.
*/
#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
/*
* AFBC block-split
*
* Indicates that the payload of each superblock is split. The second
* half of the payload is positioned at a predefined offset from the start
* of the superblock payload.
*/
#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
/*
* AFBC sparse layout
*
* This flag indicates that the payload of each superblock must be stored at a
* predefined position relative to the other superblocks in the same AFBC
* buffer. This order is the same order used by the header buffer. In this mode
* each superblock is given the same amount of space as an uncompressed
* superblock of the particular format would require, rounding up to the next
* multiple of 128 bytes in size.
*/
#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
/*
* AFBC copy-block restrict
*
* Buffers with this flag must obey the copy-block restriction. The restriction
* is such that there are no copy-blocks referring across the border of 8x8
* blocks. For the subsampled data the 8x8 limitation is also subsampled.
*/
#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
/*
* AFBC tiled layout
*
* The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
* superblocks inside a tile are stored together in memory. 8x8 tiles are used
* for pixel formats up to and including 32 bpp while 4x4 tiles are used for
* larger bpp formats. The order between the tiles is scan line.
* When the tiled layout is used, the buffer size (in pixels) must be aligned
* to the tile size.
*/
#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
/*
* AFBC solid color blocks
*
* Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
* can be reduced if a whole superblock is a single color.
*/
#define AFBC_FORMAT_MOD_SC (1ULL << 9)
#if defined(__cplusplus)
}
#endif

View File

@@ -93,6 +93,15 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
#define DRM_MODE_PICTURE_ASPECT_64_27 3
#define DRM_MODE_PICTURE_ASPECT_256_135 4
/* Content type options */
#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
#define DRM_MODE_CONTENT_TYPE_PHOTO 2
#define DRM_MODE_CONTENT_TYPE_CINEMA 3
#define DRM_MODE_CONTENT_TYPE_GAME 4
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
@@ -102,6 +111,10 @@ extern "C" {
(DRM_MODE_PICTURE_ASPECT_4_3<<19)
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
#define DRM_MODE_FLAG_PIC_AR_64_27 \
(DRM_MODE_PICTURE_ASPECT_64_27<<19)
#define DRM_MODE_FLAG_PIC_AR_256_135 \
(DRM_MODE_PICTURE_ASPECT_256_135<<19)
#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
DRM_MODE_FLAG_NHSYNC | \
@@ -338,6 +351,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
struct drm_mode_get_connector {

View File

@@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec {
__u64 async;
};
/* Exynos DRM IPP v2 API */
/**
* Enumerate available IPP hardware modules.
*
* @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
* @reserved: padding
* @ipp_id_ptr: pointer to ipp_id array or NULL
*/
struct drm_exynos_ioctl_ipp_get_res {
__u32 count_ipps;
__u32 reserved;
__u64 ipp_id_ptr;
};
enum drm_exynos_ipp_format_type {
DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01,
DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02,
};
struct drm_exynos_ipp_format {
__u32 fourcc;
__u32 type;
__u64 modifier;
};
enum drm_exynos_ipp_capability {
DRM_EXYNOS_IPP_CAP_CROP = 0x01,
DRM_EXYNOS_IPP_CAP_ROTATE = 0x02,
DRM_EXYNOS_IPP_CAP_SCALE = 0x04,
DRM_EXYNOS_IPP_CAP_CONVERT = 0x08,
};
/**
* Get IPP hardware capabilities and supported image formats.
*
* @ipp_id: id of IPP module to query
* @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
* @reserved: padding
* @formats_count: size of formats array (in entries) / number of filled
* formats (set by driver)
* @formats_ptr: pointer to formats array or NULL
*/
struct drm_exynos_ioctl_ipp_get_caps {
__u32 ipp_id;
__u32 capabilities;
__u32 reserved;
__u32 formats_count;
__u64 formats_ptr;
};
enum drm_exynos_ipp_limit_type {
/* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001,
/* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002,
/* image buffer area */
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16,
/* src/dst rectangle area */
DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16,
/* src/dst rectangle area when rotation enabled */
DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16,
DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f,
DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16,
};
struct drm_exynos_ipp_limit_val {
__u32 min;
__u32 max;
__u32 align;
__u32 reserved;
};
/**
* IPP module limitation.
*
* @type: limit type (see drm_exynos_ipp_limit_type enum)
* @reserved: padding
* @h: horizontal limits
* @v: vertical limits
*/
struct drm_exynos_ipp_limit {
__u32 type;
__u32 reserved;
struct drm_exynos_ipp_limit_val h;
struct drm_exynos_ipp_limit_val v;
};
/**
* Get IPP limits for given image format.
*
* @ipp_id: id of IPP module to query
* @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
* @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
* @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
* @limits_count: size of limits array (in entries) / number of filled entries
* (set by driver)
* @limits_ptr: pointer to limits array or NULL
*/
struct drm_exynos_ioctl_ipp_get_limits {
__u32 ipp_id;
__u32 fourcc;
__u64 modifier;
__u32 type;
__u32 limits_count;
__u64 limits_ptr;
};
enum drm_exynos_ipp_task_id {
/* buffer described by struct drm_exynos_ipp_task_buffer */
DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001,
/* rectangle described by struct drm_exynos_ipp_task_rect */
DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002,
/* transformation described by struct drm_exynos_ipp_task_transform */
DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003,
/* alpha configuration described by struct drm_exynos_ipp_task_alpha */
DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004,
/* source image data (for buffer and rectangle chunks) */
DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16,
/* destination image data (for buffer and rectangle chunks) */
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16,
};
/**
* Memory buffer with image data.
*
* @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
* other parameters are same as for AddFB2 generic DRM ioctl
*/
struct drm_exynos_ipp_task_buffer {
__u32 id;
__u32 fourcc;
__u32 width, height;
__u32 gem_id[4];
__u32 offset[4];
__u32 pitch[4];
__u64 modifier;
};
/**
* Rectangle for processing.
*
* @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
* @reserved: padding
* @x,@y: left corner in pixels
* @w,@h: width/height in pixels
*/
struct drm_exynos_ipp_task_rect {
__u32 id;
__u32 reserved;
__u32 x;
__u32 y;
__u32 w;
__u32 h;
};
/**
* Image tranformation description.
*
* @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
* @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
*/
struct drm_exynos_ipp_task_transform {
__u32 id;
__u32 rotation;
};
/**
* Image global alpha configuration for formats without alpha values.
*
* @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
* @value: global alpha value (0-255)
*/
struct drm_exynos_ipp_task_alpha {
__u32 id;
__u32 value;
};
enum drm_exynos_ipp_flag {
/* generate DRM event after processing */
DRM_EXYNOS_IPP_FLAG_EVENT = 0x01,
/* dry run, only check task parameters */
DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02,
/* non-blocking processing */
DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04,
};
#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
/**
* Perform image processing described by array of drm_exynos_ipp_task_*
* structures (parameters array).
*
* @ipp_id: id of IPP module to run the task
* @flags: bitmask of drm_exynos_ipp_flag values
* @reserved: padding
* @params_size: size of parameters array (in bytes)
* @params_ptr: pointer to parameters array or NULL
* @user_data: (optional) data for drm event
*/
struct drm_exynos_ioctl_ipp_commit {
__u32 ipp_id;
__u32 flags;
__u32 reserved;
__u32 params_size;
__u64 params_ptr;
__u64 user_data;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP 0x01
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec {
#define DRM_EXYNOS_G2D_EXEC 0x22
/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
/* IPP - Image Post Processing */
#define DRM_EXYNOS_IPP_GET_RESOURCES 0x40
#define DRM_EXYNOS_IPP_GET_CAPS 0x41
#define DRM_EXYNOS_IPP_GET_LIMITS 0x42
#define DRM_EXYNOS_IPP_COMMIT 0x43
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec {
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_RESOURCES, \
struct drm_exynos_ioctl_ipp_get_res)
#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_LIMITS, \
struct drm_exynos_ioctl_ipp_get_limits)
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
#define DRM_EXYNOS_IPP_EVENT 0x80000002
struct drm_exynos_g2d_event {
struct drm_event base;
@@ -177,6 +407,16 @@ struct drm_exynos_g2d_event {
__u32 reserved;
};
struct drm_exynos_ipp_event {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 ipp_id;
__u32 sequence;
__u64 reserved;
};
#if defined(__cplusplus)
}
#endif

View File

@@ -32,143 +32,615 @@ extern "C" {
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
/**
* struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
*/
struct drm_tegra_gem_create {
/**
* @size:
*
* The size, in bytes, of the buffer object to be created.
*/
__u64 size;
/**
* @flags:
*
* A bitmask of flags that influence the creation of GEM objects:
*
* DRM_TEGRA_GEM_CREATE_TILED
* Use the 16x16 tiling format for this buffer.
*
* DRM_TEGRA_GEM_CREATE_BOTTOM_UP
* The buffer has a bottom-up layout.
*/
__u32 flags;
/**
* @handle:
*
* The handle of the created GEM object. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u32 handle;
};
/**
* struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
*/
struct drm_tegra_gem_mmap {
/**
* @handle:
*
* Handle of the GEM object to obtain an mmap offset for.
*/
__u32 handle;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
/**
* @offset:
*
* The mmap offset for the given GEM object. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u64 offset;
};
/**
* struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
*/
struct drm_tegra_syncpt_read {
/**
* @id:
*
* ID of the syncpoint to read the current value from.
*/
__u32 id;
/**
* @value:
*
* The current syncpoint value. Set by the kernel upon successful
* completion of the IOCTL.
*/
__u32 value;
};
/**
* struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
*/
struct drm_tegra_syncpt_incr {
/**
* @id:
*
* ID of the syncpoint to increment.
*/
__u32 id;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
*/
struct drm_tegra_syncpt_wait {
/**
* @id:
*
* ID of the syncpoint to wait on.
*/
__u32 id;
/**
* @thresh:
*
* Threshold value for which to wait.
*/
__u32 thresh;
/**
* @timeout:
*
* Timeout, in milliseconds, to wait.
*/
__u32 timeout;
/**
* @value:
*
* The new syncpoint value after the wait. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u32 value;
};
#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
/**
* struct drm_tegra_open_channel - parameters for the open channel IOCTL
*/
struct drm_tegra_open_channel {
/**
* @client:
*
* The client ID for this channel.
*/
__u32 client;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
/**
* @context:
*
* The application context of this channel. Set by the kernel upon
* successful completion of the IOCTL. This context needs to be passed
* to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
*/
__u64 context;
};
/**
* struct drm_tegra_close_channel - parameters for the close channel IOCTL
*/
struct drm_tegra_close_channel {
/**
* @context:
*
* The application context of this channel. This is obtained from the
* DRM_TEGRA_OPEN_CHANNEL IOCTL.
*/
__u64 context;
};
/**
* struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
*/
struct drm_tegra_get_syncpt {
/**
* @context:
*
* The application context identifying the channel for which to obtain
* the syncpoint ID.
*/
__u64 context;
/**
* @index:
*
* Index of the client syncpoint for which to obtain the ID.
*/
__u32 index;
/**
* @id:
*
* The ID of the given syncpoint. Set by the kernel upon successful
* completion of the IOCTL.
*/
__u32 id;
};
/**
* struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
*/
struct drm_tegra_get_syncpt_base {
/**
* @context:
*
* The application context identifying for which channel to obtain the
* wait base.
*/
__u64 context;
/**
* @syncpt:
*
* ID of the syncpoint for which to obtain the wait base.
*/
__u32 syncpt;
/**
* @id:
*
* The ID of the wait base corresponding to the client syncpoint. Set
* by the kernel upon successful completion of the IOCTL.
*/
__u32 id;
};
/**
* struct drm_tegra_syncpt - syncpoint increment operation
*/
struct drm_tegra_syncpt {
/**
* @id:
*
* ID of the syncpoint to operate on.
*/
__u32 id;
/**
* @incrs:
*
* Number of increments to perform for the syncpoint.
*/
__u32 incrs;
};
/**
* struct drm_tegra_cmdbuf - structure describing a command buffer
*/
struct drm_tegra_cmdbuf {
/**
* @handle:
*
* Handle to a GEM object containing the command buffer.
*/
__u32 handle;
/**
* @offset:
*
* Offset, in bytes, into the GEM object identified by @handle at
* which the command buffer starts.
*/
__u32 offset;
/**
* @words:
*
* Number of 32-bit words in this command buffer.
*/
__u32 words;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_reloc - GEM object relocation structure
*/
struct drm_tegra_reloc {
struct {
/**
* @cmdbuf.handle:
*
* Handle to the GEM object containing the command buffer for
* which to perform this GEM object relocation.
*/
__u32 handle;
/**
* @cmdbuf.offset:
*
* Offset, in bytes, into the command buffer at which to
* insert the relocated address.
*/
__u32 offset;
} cmdbuf;
struct {
/**
* @target.handle:
*
* Handle to the GEM object to be relocated.
*/
__u32 handle;
/**
* @target.offset:
*
* Offset, in bytes, into the target GEM object at which the
* relocated data starts.
*/
__u32 offset;
} target;
/**
* @shift:
*
* The number of bits by which to shift relocated addresses.
*/
__u32 shift;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_waitchk - wait check structure
*/
struct drm_tegra_waitchk {
/**
* @handle:
*
* Handle to the GEM object containing a command stream on which to
* perform the wait check.
*/
__u32 handle;
/**
* @offset:
*
* Offset, in bytes, of the location in the command stream to perform
* the wait check on.
*/
__u32 offset;
/**
* @syncpt:
*
* ID of the syncpoint to wait check.
*/
__u32 syncpt;
/**
* @thresh:
*
* Threshold value for which to check.
*/
__u32 thresh;
};
/**
* struct drm_tegra_submit - job submission structure
*/
struct drm_tegra_submit {
/**
* @context:
*
* The application context identifying the channel to use for the
* execution of this job.
*/
__u64 context;
__u32 num_syncpts;
__u32 num_cmdbufs;
__u32 num_relocs;
__u32 num_waitchks;
__u32 waitchk_mask;
__u32 timeout;
__u64 syncpts;
__u64 cmdbufs;
__u64 relocs;
__u64 waitchks;
__u32 fence; /* Return value */
__u32 reserved[5]; /* future expansion */
/**
* @num_syncpts:
*
* The number of syncpoints operated on by this job. This defines the
* length of the array pointed to by @syncpts.
*/
__u32 num_syncpts;
/**
* @num_cmdbufs:
*
* The number of command buffers to execute as part of this job. This
* defines the length of the array pointed to by @cmdbufs.
*/
__u32 num_cmdbufs;
/**
* @num_relocs:
*
* The number of relocations to perform before executing this job.
* This defines the length of the array pointed to by @relocs.
*/
__u32 num_relocs;
/**
* @num_waitchks:
*
* The number of wait checks to perform as part of this job. This
* defines the length of the array pointed to by @waitchks.
*/
__u32 num_waitchks;
/**
* @waitchk_mask:
*
* Bitmask of valid wait checks.
*/
__u32 waitchk_mask;
/**
* @timeout:
*
* Timeout, in milliseconds, before this job is cancelled.
*/
__u32 timeout;
/**
* @syncpts:
*
* A pointer to an array of &struct drm_tegra_syncpt structures that
* specify the syncpoint operations performed as part of this job.
* The number of elements in the array must be equal to the value
* given by @num_syncpts.
*/
__u64 syncpts;
/**
* @cmdbufs:
*
* A pointer to an array of &struct drm_tegra_cmdbuf structures that
* define the command buffers to execute as part of this job. The
* number of elements in the array must be equal to the value given
* by @num_syncpts.
*/
__u64 cmdbufs;
/**
* @relocs:
*
* A pointer to an array of &struct drm_tegra_reloc structures that
* specify the relocations that need to be performed before executing
* this job. The number of elements in the array must be equal to the
* value given by @num_relocs.
*/
__u64 relocs;
/**
* @waitchks:
*
* A pointer to an array of &struct drm_tegra_waitchk structures that
* specify the wait checks to be performed while executing this job.
* The number of elements in the array must be equal to the value
* given by @num_waitchks.
*/
__u64 waitchks;
/**
* @fence:
*
* The threshold of the syncpoint associated with this job after it
* has been completed. Set by the kernel upon successful completion of
* the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
* wait for this job to be finished.
*/
__u32 fence;
/**
* @reserved:
*
* This field is reserved for future use. Must be 0.
*/
__u32 reserved[5];
};
#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
/**
* struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
*/
struct drm_tegra_gem_set_tiling {
/* input */
/**
* @handle:
*
* Handle to the GEM object for which to set the tiling parameters.
*/
__u32 handle;
/**
* @mode:
*
* The tiling mode to set. Must be one of:
*
* DRM_TEGRA_GEM_TILING_MODE_PITCH
* pitch linear format
*
* DRM_TEGRA_GEM_TILING_MODE_TILED
* 16x16 tiling format
*
* DRM_TEGRA_GEM_TILING_MODE_BLOCK
* 16Bx2 tiling format
*/
__u32 mode;
/**
* @value:
*
* The value to set for the tiling mode parameter.
*/
__u32 value;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
*/
struct drm_tegra_gem_get_tiling {
/* input */
/**
* @handle:
*
* Handle to the GEM object for which to query the tiling parameters.
*/
__u32 handle;
/* output */
/**
* @mode:
*
* The tiling mode currently associated with the GEM object. Set by
* the kernel upon successful completion of the IOCTL.
*/
__u32 mode;
/**
* @value:
*
* The tiling mode parameter currently associated with the GEM object.
* Set by the kernel upon successful completion of the IOCTL.
*/
__u32 value;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
/**
* struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
*/
struct drm_tegra_gem_set_flags {
/* input */
/**
* @handle:
*
* Handle to the GEM object for which to set the flags.
*/
__u32 handle;
/* output */
/**
* @flags:
*
* The flags to set for the GEM object.
*/
__u32 flags;
};
/**
* struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
*/
struct drm_tegra_gem_get_flags {
/* input */
/**
* @handle:
*
* Handle to the GEM object for which to query the flags.
*/
__u32 handle;
/* output */
/**
* @flags:
*
* The flags currently associated with the GEM object. Set by the
* kernel upon successful completion of the IOCTL.
*/
__u32 flags;
};
@@ -193,7 +665,7 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)

194
include/uapi/drm/v3d_drm.h Normal file
View File

@@ -0,0 +1,194 @@
/*
* Copyright © 2014-2018 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _V3D_DRM_H_
#define _V3D_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
#define DRM_V3D_SUBMIT_CL 0x00
#define DRM_V3D_WAIT_BO 0x01
#define DRM_V3D_CREATE_BO 0x02
#define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
* engine.
*
* This asks the kernel to have the GPU execute an optional binner
* command list, and a render command list.
*/
struct drm_v3d_submit_cl {
/* Pointer to the binner command list.
*
* This is the first set of commands executed, which runs the
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
*/
__u32 bcl_start;
/** End address of the BCL (first byte after the BCL) */
__u32 bcl_end;
/* Offset of the render command list.
*
* This is the second set of commands executed, which will either
* execute the tiles that have been set up by the BCL, or a fixed set
* of tiles (in the case of RCL-only blits).
*/
__u32 rcl_start;
/** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
/** An optional sync object to wait on before starting the BCL. */
__u32 in_sync_bcl;
/** An optional sync object to wait on before starting the RCL. */
__u32 in_sync_rcl;
/** An optional sync object to place the completion fence in. */
__u32 out_sync;
/* Offset of the tile alloc memory
*
* This is optional on V3D 3.3 (where the CL can set the value) but
* required on V3D 4.1.
*/
__u32 qma;
/** Size of the tile alloc memory. */
__u32 qms;
/** Offset of the tile state data array. */
__u32 qts;
/* Pointer to a u32 array of the BOs that are referenced by the job.
*/
__u64 bo_handles;
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
/* Pad, must be zero-filled. */
__u32 pad;
};
/**
* struct drm_v3d_wait_bo - ioctl argument for waiting for
* completion of the last DRM_V3D_SUBMIT_CL on a BO.
*
* This is useful for cases where multiple processes might be
* rendering to a BO and you want to wait for all rendering to be
* completed.
*/
struct drm_v3d_wait_bo {
__u32 handle;
__u32 pad;
__u64 timeout_ns;
};
/**
* struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_v3d_create_bo {
__u32 size;
__u32 flags;
/** Returned GEM handle for the BO. */
__u32 handle;
/**
* Returned offset for the BO in the V3D address space. This offset
* is private to the DRM fd and is valid for the lifetime of the GEM
* handle.
*
* This offset value will always be nonzero, since various HW
* units treat 0 specially.
*/
__u32 offset;
};
/**
* struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
*
* This doesn't actually perform an mmap. Instead, it returns the
* offset you need to use in an mmap on the DRM device node. This
* means that tools like valgrind end up knowing about the mapped
* memory.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_v3d_mmap_bo {
/** Handle for the object being mapped. */
__u32 handle;
__u32 flags;
/** offset into the drm node to use for subsequent mmap call. */
__u64 offset;
};
enum drm_v3d_param {
DRM_V3D_PARAM_V3D_UIFCFG,
DRM_V3D_PARAM_V3D_HUB_IDENT1,
DRM_V3D_PARAM_V3D_HUB_IDENT2,
DRM_V3D_PARAM_V3D_HUB_IDENT3,
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
};
struct drm_v3d_get_param {
__u32 param;
__u32 pad;
__u64 value;
};
/**
* Returns the offset for the BO in the V3D address space for this DRM fd.
* This is the same value returned by drm_v3d_create_bo, if that was called
* from this DRM fd.
*/
struct drm_v3d_get_bo_offset {
__u32 handle;
__u32 offset;
};
#if defined(__cplusplus)
}
#endif
#endif /* _V3D_DRM_H_ */

View File

@@ -183,10 +183,17 @@ struct drm_vc4_submit_cl {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmonid;
/* Unused field to align this struct on 64 bits. Must be set to 0.
* If one ever needs to add an u32 field to this struct, this field
* can be used.
/* Syncobj handle to wait on. If set, processing of this render job
* will not start until the syncobj is signaled. 0 means ignore.
*/
__u32 in_sync;
/* Syncobj handle to export fence to. If set, the fence in the syncobj
* will be replaced with a fence that signals upon completion of this
* render job. 0 means ignore.
*/
__u32 out_sync;
__u32 pad2;
};

View File

@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
#define DRM_VMW_ALLOC_BO 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
@@ -68,6 +69,8 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
/*************************************************************************/
/**
@@ -79,6 +82,9 @@ extern "C" {
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
*
* DRM_VMW_PARAM_SM4_1
* SM4_1 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -94,6 +100,8 @@ extern "C" {
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
/*************************************************************************/
/**
* DRM_VMW_ALLOC_DMABUF
* DRM_VMW_ALLOC_BO
*
* Allocate a DMA buffer that is visible also to the host.
* Allocate a buffer object that is visible also to the host.
* NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
* be zero at all times, or it may disappear from the interface before it is
* fixed.
*
* The DMA buffer may stay user-space mapped in the guest at all times,
* The buffer object may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation.
*
* DMA buffers are mapped using the mmap() syscall on the drm device.
* Buffer objects are mapped using the mmap() syscall on the drm device.
*/
/**
* struct drm_vmw_alloc_dmabuf_req
* struct drm_vmw_alloc_bo_req
*
* @size: Required minimum size of the buffer.
*
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
* Input data to the DRM_VMW_ALLOC_BO Ioctl.
*/
struct drm_vmw_alloc_dmabuf_req {
struct drm_vmw_alloc_bo_req {
__u32 size;
__u32 pad64;
};
#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
/**
* struct drm_vmw_dmabuf_rep
* struct drm_vmw_bo_rep
*
* @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above.
*
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
* Output data from the DRM_VMW_ALLOC_BO Ioctl.
*/
struct drm_vmw_dmabuf_rep {
struct drm_vmw_bo_rep {
__u64 map_handle;
__u32 handle;
__u32 cur_gmr_id;
__u32 cur_gmr_offset;
__u32 pad64;
};
#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
/**
* union drm_vmw_dmabuf_arg
* union drm_vmw_alloc_bo_arg
*
* @req: Input data as described above.
* @rep: Output data as described above.
*
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
* Argument to the DRM_VMW_ALLOC_BO Ioctl.
*/
union drm_vmw_alloc_dmabuf_arg {
struct drm_vmw_alloc_dmabuf_req req;
struct drm_vmw_dmabuf_rep rep;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
*
*/
/**
* struct drm_vmw_unref_dmabuf_arg
*
* @handle: Handle indicating what buffer to free. Obtained from the
* DRM_VMW_ALLOC_DMABUF Ioctl.
*
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
*/
struct drm_vmw_unref_dmabuf_arg {
__u32 handle;
__u32 pad64;
union drm_vmw_alloc_bo_arg {
struct drm_vmw_alloc_bo_req req;
struct drm_vmw_bo_rep rep;
};
#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
/*************************************************************************/
/**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource.
*
* Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
* The ioctl arguments therefore need to be identical in layout.
*
* Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
* Ioctl.
*/
/**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
__u32 handle;
__u32 pad64;
};
#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
/*************************************************************************/
/**
* DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
*
* Allocates a surface handle and queues a create surface command
* for the host on the first use of the surface. The surface ID can
* be used as the surface ID in commands referencing the surface.
*
* This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
* parameter and 64 bit svga flag.
*/
/**
* enum drm_vmw_surface_version
*
* @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
* svga3d surface flags split into 2, upper half and lower half.
*/
enum drm_vmw_surface_version {
drm_vmw_gb_surface_v1
};
/**
* struct drm_vmw_gb_surface_create_ext_req
*
* @base: Surface create parameters.
* @version: Version of surface create ioctl.
* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
* @multisample_pattern: Multisampling pattern when msaa is supported.
* @quality_level: Precision settings for each sample.
* @must_be_zero: Reserved for future usage.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
*/
struct drm_vmw_gb_surface_create_ext_req {
struct drm_vmw_gb_surface_create_req base;
enum drm_vmw_surface_version version;
uint32_t svga3d_flags_upper_32_bits;
SVGA3dMSPattern multisample_pattern;
SVGA3dMSQualityLevel quality_level;
uint64_t must_be_zero;
};
/**
* union drm_vmw_gb_surface_create_ext_arg
*
* @req: Input argument as described above.
* @rep: Output argument as described above.
*
* Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
*/
union drm_vmw_gb_surface_create_ext_arg {
struct drm_vmw_gb_surface_create_rep rep;
struct drm_vmw_gb_surface_create_ext_req req;
};
/*************************************************************************/
/**
* DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
*
* Puts a reference on a host surface with a given handle, as previously
* returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
* A reference will make sure the surface isn't destroyed while we hold
* it and will allow the calling client to use the surface handle in
* the command stream.
*
* On successful return, the Ioctl returns the surface information given
* to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
*/
/**
* struct drm_vmw_gb_surface_ref_ext_rep
*
* @creq: The data used as input when the surface was created, as described
* above at "struct drm_vmw_gb_surface_create_ext_req"
* @crep: Additional data output when the surface was created, as described
* above at "struct drm_vmw_gb_surface_create_rep"
*
* Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
*/
struct drm_vmw_gb_surface_ref_ext_rep {
struct drm_vmw_gb_surface_create_ext_req creq;
struct drm_vmw_gb_surface_create_rep crep;
};
/**
* union drm_vmw_gb_surface_reference_ext_arg
*
* @req: Input data as described above at "struct drm_vmw_surface_arg"
* @rep: Output data as described above at
* "struct drm_vmw_gb_surface_ref_ext_rep"
*
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
*/
union drm_vmw_gb_surface_reference_ext_arg {
struct drm_vmw_gb_surface_ref_ext_rep rep;
struct drm_vmw_surface_arg req;
};
#if defined(__cplusplus)
}