Merge branch 'next' into for-linus

Prepare input updates for 4.10 merge window.
This commit is contained in:
Dmitry Torokhov
2016-12-16 09:31:17 -08:00
17206 changed files with 1178860 additions and 539945 deletions

View File

@@ -61,12 +61,12 @@ bool acpi_ata_match(acpi_handle handle);
bool acpi_bay_match(acpi_handle handle);
bool acpi_dock_match(acpi_handle handle);
bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs);
bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
int rev, int func, union acpi_object *argv4);
u64 rev, u64 func, union acpi_object *argv4);
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
union acpi_object *argv4, acpi_object_type type)
{
union acpi_object *obj;
@@ -87,7 +87,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
.package.elements = (eles) \
}
bool acpi_dev_present(const char *hid);
bool acpi_dev_found(const char *hid);
#ifdef CONFIG_ACPI
@@ -420,6 +420,13 @@ static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwn
container_of(fwnode, struct acpi_data_node, fwnode) : NULL;
}
static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
const char *name)
{
return is_acpi_data_node(fwnode) ?
(!strcmp(to_acpi_data_node(fwnode)->name, name)) : false;
}
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return &adev->fwnode;

View File

@@ -13,7 +13,7 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
void __iomem *__init_refok
void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);

View File

@@ -15,6 +15,10 @@ extern int pxm_to_node(int);
extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
extern int acpi_numa __initdata;
extern void bad_srat(void);
extern int srat_disabled(void);
#endif /* CONFIG_ACPI_NUMA */
#endif /* __ACP_NUMA_H */

View File

@@ -96,7 +96,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
char **new_val);
acpi_string *new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
@@ -108,7 +108,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
acpi_physical_address * new_address,
acpi_physical_address *new_address,
u32 *new_table_length);
#endif
@@ -203,7 +203,7 @@ void acpi_os_unmap_memory(void *logical_address, acpi_size size);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
acpi_status
acpi_os_get_physical_address(void *logical_address,
acpi_physical_address * physical_address);
acpi_physical_address *physical_address);
#endif
/*
@@ -379,14 +379,14 @@ acpi_status
acpi_os_get_table_by_name(char *signature,
u32 instance,
struct acpi_table_header **table,
acpi_physical_address * address);
acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
acpi_status
acpi_os_get_table_by_index(u32 index,
struct acpi_table_header **table,
u32 *instance, acpi_physical_address * address);
u32 *instance, acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address

View File

@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20160108
#define ACPI_CA_VERSION 0x20160422
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -484,8 +484,8 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
acpi_find_root_pointer(acpi_physical_address *
rsdp_address))
acpi_find_root_pointer(acpi_physical_address
*rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table_header(acpi_string signature,
u32 instance,
@@ -530,7 +530,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_handle(acpi_handle parent,
acpi_string pathname,
acpi_handle * ret_handle))
acpi_handle *ret_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_attach_data(acpi_handle object,
acpi_object_handler handler,
@@ -575,15 +575,15 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
acpi_handle child,
acpi_handle * out_handle))
acpi_handle *out_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_type(acpi_handle object,
acpi_object_type * out_type))
acpi_object_type *out_type))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_parent(acpi_handle object,
acpi_handle * out_handle))
acpi_handle *out_handle))
/*
* Handler interfaces
@@ -755,7 +755,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
acpi_handle * gpe_device))
acpi_handle *gpe_device))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
@@ -771,8 +771,8 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
* Resource interfaces
*/
typedef
acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
void *context);
acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource,
void *context);
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_vendor_resource(acpi_handle device,
@@ -938,7 +938,8 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void
ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
acpi_log_error(const char *format, ...))
acpi_status acpi_initialize_debugger(void);
acpi_status acpi_initialize_debugger(void);
void acpi_terminate_debugger(void);

View File

@@ -417,6 +417,7 @@ struct acpi_resource_gpio {
u8 type; \
u8 producer_consumer; /* For values, see Producer/Consumer above */\
u8 slave_mode; \
u8 connection_sharing; \
u8 type_revision_id; \
u16 type_data_length; \
u16 vendor_length; \

View File

@@ -223,7 +223,7 @@ struct acpi_table_facs {
/*******************************************************************************
*
* FADT - Fixed ACPI Description Table (Signature "FACP")
* Version 4
* Version 6
*
******************************************************************************/
@@ -413,4 +413,6 @@ struct acpi_table_desc {
#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
#endif /* __ACTBL_H__ */

View File

@@ -236,7 +236,8 @@ enum acpi_einj_actions {
ACPI_EINJ_CHECK_BUSY_STATUS = 6,
ACPI_EINJ_GET_COMMAND_STATUS = 7,
ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
ACPI_EINJ_ACTION_RESERVED = 9, /* 9 and greater are reserved */
ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
@@ -348,7 +349,8 @@ enum acpi_erst_actions {
ACPI_ERST_GET_ERROR_RANGE = 13,
ACPI_ERST_GET_ERROR_LENGTH = 14,
ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
ACPI_ERST_ACTION_RESERVED = 16 /* 16 and greater are reserved */
ACPI_ERST_EXECUTE_TIMINGS = 16,
ACPI_ERST_ACTION_RESERVED = 17 /* 17 and greater are reserved */
};
/* Values for Instruction field above */
@@ -427,7 +429,8 @@ enum acpi_hest_types {
ACPI_HEST_TYPE_AER_ENDPOINT = 7,
ACPI_HEST_TYPE_AER_BRIDGE = 8,
ACPI_HEST_TYPE_GENERIC_ERROR = 9,
ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */
ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
ACPI_HEST_TYPE_RESERVED = 11 /* 11 and greater are reserved */
};
/*
@@ -506,7 +509,11 @@ enum acpi_hest_notify_types {
ACPI_HEST_NOTIFY_NMI = 4,
ACPI_HEST_NOTIFY_CMCI = 5, /* ACPI 5.0 */
ACPI_HEST_NOTIFY_MCE = 6, /* ACPI 5.0 */
ACPI_HEST_NOTIFY_RESERVED = 7 /* 7 and greater are reserved */
ACPI_HEST_NOTIFY_GPIO = 7, /* ACPI 6.0 */
ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */
ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */
ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */
ACPI_HEST_NOTIFY_RESERVED = 11 /* 11 and greater are reserved */
};
/* Values for config_write_enable bitfield above */
@@ -603,6 +610,24 @@ struct acpi_hest_generic {
u32 error_block_length;
};
/* 10: Generic Hardware Error Source, version 2 */
struct acpi_hest_generic_v2 {
struct acpi_hest_header header;
u16 related_source_id;
u8 reserved;
u8 enabled;
u32 records_to_preallocate;
u32 max_sections_per_record;
u32 max_raw_data_length;
struct acpi_generic_address error_status_address;
struct acpi_hest_notify notify;
u32 error_block_length;
struct acpi_generic_address read_ack_register;
u64 read_ack_preserve;
u64 read_ack_write;
};
/* Generic Error Status block */
struct acpi_hest_generic_status {
@@ -634,6 +659,33 @@ struct acpi_hest_generic_data {
u8 fru_text[20];
};
/* Extension for revision 0x0300 */
struct acpi_hest_generic_data_v300 {
u8 section_type[16];
u32 error_severity;
u16 revision;
u8 validation_bits;
u8 flags;
u32 error_data_length;
u8 fru_id[16];
u8 fru_text[20];
u64 time_stamp;
};
/* Values for error_severity above */
#define ACPI_HEST_GEN_ERROR_RECOVERABLE 0
#define ACPI_HEST_GEN_ERROR_FATAL 1
#define ACPI_HEST_GEN_ERROR_CORRECTED 2
#define ACPI_HEST_GEN_ERROR_NONE 3
/* Flags for validation_bits above */
#define ACPI_HEST_GEN_VALID_FRU_ID (1)
#define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1)
#define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2)
/*******************************************************************************
*
* MADT - Multiple APIC Description Table
@@ -934,7 +986,7 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
* NFIT - NVDIMM Interface Table (ACPI 6.0)
* NFIT - NVDIMM Interface Table (ACPI 6.0+)
* Version 1
*
******************************************************************************/
@@ -1015,6 +1067,7 @@ struct acpi_nfit_memory_map {
#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */
#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */
/* 2: Interleave Structure */
@@ -1046,7 +1099,10 @@ struct acpi_nfit_control_region {
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 subsystem_revision_id;
u8 reserved[6]; /* Reserved, must be zero */
u8 valid_fields;
u8 manufacturing_location;
u16 manufacturing_date;
u8 reserved[2]; /* Reserved, must be zero */
u32 serial_number;
u16 code;
u16 windows;
@@ -1061,7 +1117,11 @@ struct acpi_nfit_control_region {
/* Flags */
#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
/* valid_fields bits */
#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */
/* 5: NVDIMM Block Data Window Region Structure */

View File

@@ -321,7 +321,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
* Conforms to "Microsoft Debug Port Table 2 (DBG2)", May 22 2012.
* Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
*
******************************************************************************/
@@ -371,6 +371,11 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
#define ACPI_DBG2_ARM_PL011 0x0003
#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
#define ACPI_DBG2_ARM_DCC 0x000F
#define ACPI_DBG2_BCM2835 0x0010
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -399,7 +404,7 @@ struct acpi_table_dbgp {
* Version 1
*
* Conforms to "Intel Virtualization Technology for Directed I/O",
* Version 2.2, Sept. 2013
* Version 2.3, October 2014
*
******************************************************************************/
@@ -413,6 +418,8 @@ struct acpi_table_dmar {
/* Masks for Flags field above */
#define ACPI_DMAR_INTR_REMAP (1)
#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1)
#define ACPI_DMAR_X2APIC_MODE (1<<2)
/* DMAR subtable header */
@@ -655,7 +662,7 @@ struct acpi_ibft_target {
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
* Document number: ARM DEN 0049A, 2015
* Document number: ARM DEN 0049B, October 2015
*
******************************************************************************/
@@ -685,7 +692,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_ITS_GROUP = 0x00,
ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
ACPI_IORT_NODE_SMMU = 0x03
ACPI_IORT_NODE_SMMU = 0x03,
ACPI_IORT_NODE_SMMU_V3 = 0x04
};
struct acpi_iort_id_mapping {
@@ -775,6 +783,23 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
struct acpi_iort_smmu_v3 {
u64 base_address; /* SMMUv3 base address */
u32 flags;
u32 reserved;
u64 vatos_address;
u32 model; /* O: generic SMMUv3 */
u32 event_gsiv;
u32 pri_gsiv;
u32 gerr_gsiv;
u32 sync_gsiv;
};
/* Masks for Flags field above */
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1)
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -1102,10 +1127,10 @@ struct acpi_table_slic {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
* Version 1
* Version 2
*
* Conforms to "Serial Port Console Redirection Table",
* Version 1.00, January 11, 2002
* Version 1.03, August 10, 2015
*
******************************************************************************/
@@ -1137,6 +1162,8 @@ struct acpi_table_spcr {
#define ACPI_SPCR_DO_NOT_DISABLE (1)
/* Values for Interface Type: See the definition of the DBG2 table */
/*******************************************************************************
*
* SPMI - Server Platform Management Interface table

View File

@@ -184,7 +184,7 @@ struct acpi_table_fpdt {
struct acpi_table_header header; /* Common ACPI table header */
};
/* FPDT subtable header */
/* FPDT subtable header (Performance Record Structure) */
struct acpi_fpdt_header {
u16 type;
@@ -205,6 +205,57 @@ enum acpi_fpdt_type {
/* 0: Firmware Basic Boot Performance Record */
struct acpi_fpdt_boot_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
u64 address;
};
/* 1: S3 Performance Table Pointer Record */
struct acpi_fpdt_s3pt_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
u64 address;
};
/*
* S3PT - S3 Performance Table. This table is pointed to by the
* S3 Pointer Record above.
*/
struct acpi_table_s3pt {
u8 signature[4]; /* "S3PT" */
u32 length;
};
/*
* S3PT Subtables (Not part of the actual FPDT)
*/
/* Values for Type field in S3PT header */
enum acpi_s3pt_type {
ACPI_S3PT_TYPE_RESUME = 0,
ACPI_S3PT_TYPE_SUSPEND = 1,
ACPI_FPDT_BOOT_PERFORMANCE = 2
};
struct acpi_s3pt_resume {
struct acpi_fpdt_header header;
u32 resume_count;
u64 full_resume;
u64 average_resume;
};
struct acpi_s3pt_suspend {
struct acpi_fpdt_header header;
u64 suspend_start;
u64 suspend_end;
};
/*
* FPDT Boot Performance Record (Not part of the actual FPDT)
*/
struct acpi_fpdt_boot {
struct acpi_fpdt_header header;
u8 reserved[4];
@@ -215,52 +266,6 @@ struct acpi_fpdt_boot {
u64 exit_services_exit;
};
/* 1: S3 Performance Table Pointer Record */
struct acpi_fpdt_s3pt_ptr {
struct acpi_fpdt_header header;
u8 reserved[4];
u64 address;
};
/*
* S3PT - S3 Performance Table. This table is pointed to by the
* FPDT S3 Pointer Record above.
*/
struct acpi_table_s3pt {
u8 signature[4]; /* "S3PT" */
u32 length;
};
/*
* S3PT Subtables
*/
struct acpi_s3pt_header {
u16 type;
u8 length;
u8 revision;
};
/* Values for Type field above */
enum acpi_s3pt_type {
ACPI_S3PT_TYPE_RESUME = 0,
ACPI_S3PT_TYPE_SUSPEND = 1
};
struct acpi_s3pt_resume {
struct acpi_s3pt_header header;
u32 resume_count;
u64 full_resume;
u64 average_resume;
};
struct acpi_s3pt_suspend {
struct acpi_s3pt_header header;
u64 suspend_start;
u64 suspend_end;
};
/*******************************************************************************
*
* GTDT - Generic Timer Description Table (ACPI 5.1)
@@ -476,7 +481,8 @@ struct acpi_table_pcct {
enum acpi_pcct_type {
ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
ACPI_PCCT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
ACPI_PCCT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
};
/*
@@ -515,6 +521,26 @@ struct acpi_pcct_hw_reduced {
u16 min_turnaround_time;
};
/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
struct acpi_pcct_hw_reduced_type2 {
struct acpi_subtable_header header;
u32 doorbell_interrupt;
u8 flags;
u8 reserved;
u64 base_address;
u64 length;
struct acpi_generic_address doorbell_register;
u64 preserve_mask;
u64 write_mask;
u32 latency;
u32 max_access_rate;
u16 min_turnaround_time;
struct acpi_generic_address doorbell_ack_register;
u64 ack_preserve_mask;
u64 ack_write_mask;
};
/* Values for doorbell flags above */
#define ACPI_PCCT_INTERRUPT_POLARITY (1)

View File

@@ -630,7 +630,8 @@ typedef u64 acpi_integer;
#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D
#define ACPI_NOTIFY_MAX 0x0D
#define ACPI_GENERIC_NOTIFY_MAX 0x0D
#define ACPI_SPECIFIC_NOTIFY_MAX 0x84
/*
* Types associated with ACPI names and objects. The first group of
@@ -892,7 +893,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
typedef acpi_status(*acpi_sleep_function) (u8 sleep_state);
typedef acpi_status (*acpi_sleep_function) (u8 sleep_state);
struct acpi_sleep_functions {
acpi_sleep_function legacy_function;
@@ -994,7 +995,7 @@ struct acpi_buffer {
* Predefined Namespace items
*/
struct acpi_predefined_names {
char *name;
const char *name;
u8 type;
char *val;
};
@@ -1071,20 +1072,21 @@ void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
typedef
void (*acpi_object_handler) (acpi_handle object, void *data);
typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
typedef
acpi_status (*acpi_init_handler) (acpi_handle object, u32 function);
#define ACPI_INIT_DEVICE_INI 1
typedef
acpi_status(*acpi_exception_handler) (acpi_status aml_status,
acpi_name name,
u16 opcode,
u32 aml_offset, void *context);
acpi_status (*acpi_exception_handler) (acpi_status aml_status,
acpi_name name,
u16 opcode,
u32 aml_offset, void *context);
/* Table Event handler (Load, load_table, etc.) and types */
typedef
acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
#define ACPI_TABLE_LOAD 0x0
#define ACPI_TABLE_UNLOAD 0x1
@@ -1093,12 +1095,12 @@ acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
/* Address Spaces (For Operation Regions) */
typedef
acpi_status(*acpi_adr_space_handler) (u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context,
void *region_context);
acpi_status (*acpi_adr_space_handler) (u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context,
void *region_context);
#define ACPI_DEFAULT_HANDLER NULL
@@ -1111,18 +1113,18 @@ struct acpi_connection_info {
};
typedef
acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
void *handler_context,
void **region_context);
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
void *handler_context,
void **region_context);
#define ACPI_REGION_ACTIVATE 0
#define ACPI_REGION_DEACTIVATE 1
typedef
acpi_status(*acpi_walk_callback) (acpi_handle object,
u32 nesting_level,
void *context, void **return_value);
acpi_status (*acpi_walk_callback) (acpi_handle object,
u32 nesting_level,
void *context, void **return_value);
typedef
u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
@@ -1227,7 +1229,7 @@ struct acpi_mem_space_context {
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
*/
struct acpi_memory_list {
char *list_name;
const char *list_name;
void *list_head;
u16 object_size;
u16 max_depth;

View File

@@ -15,10 +15,9 @@
#define _CPPC_ACPI_H
#include <linux/acpi.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/types.h>
#include <acpi/pcc.h>
#include <acpi/processor.h>
/* Only support CPPCv2 for now. */
@@ -130,8 +129,4 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern int acpi_get_psd_map(struct cpudata **);
/* Methods to interact with the PCC mailbox controller. */
extern struct mbox_chan *
pcc_mbox_request_channel(struct mbox_client *, unsigned int);
#endif /* _CPPC_ACPI_H*/

29
include/acpi/pcc.h Normal file
View File

@@ -0,0 +1,29 @@
/*
* PCC (Platform Communications Channel) methods
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#ifndef _PCC_H
#define _PCC_H
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#ifdef CONFIG_PCC
extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id);
extern void pcc_mbox_free_channel(struct mbox_chan *chan);
#else
static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id)
{
return NULL;
}
static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
#endif
#endif /* _PCC_H */

View File

@@ -66,17 +66,28 @@
*
*****************************************************************************/
/* Common application configuration. All single threaded except for acpi_exec. */
#if (defined ACPI_ASL_COMPILER) || \
(defined ACPI_BIN_APP) || \
(defined ACPI_DUMP_APP) || \
(defined ACPI_HELP_APP) || \
(defined ACPI_NAMES_APP) || \
(defined ACPI_SRC_APP) || \
(defined ACPI_XTRACT_APP) || \
(defined ACPI_EXAMPLE_APP)
#define ACPI_APPLICATION
#define ACPI_SINGLE_THREADED
#endif
/* iASL configuration */
#ifdef ACPI_ASL_COMPILER
#define ACPI_APPLICATION
#define ACPI_DEBUG_OUTPUT
#define ACPI_CONSTANT_EVAL_ONLY
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
#define ACPI_32BIT_PHYSICAL_ADDRESS
#define ACPI_DISASSEMBLER 1
#endif
@@ -89,21 +100,6 @@
#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
/*
* acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
* configuration. All single threaded.
*/
#if (defined ACPI_BIN_APP) || \
(defined ACPI_DUMP_APP) || \
(defined ACPI_HELP_APP) || \
(defined ACPI_NAMES_APP) || \
(defined ACPI_SRC_APP) || \
(defined ACPI_XTRACT_APP) || \
(defined ACPI_EXAMPLE_APP)
#define ACPI_APPLICATION
#define ACPI_SINGLE_THREADED
#endif
/* acpi_help configuration. Error messages disabled. */
#ifdef ACPI_HELP_APP
@@ -138,11 +134,16 @@
#define ACPI_REDUCED_HARDWARE 1
#endif
/* Linkable ACPICA library */
/* Linkable ACPICA library. Two versions, one with full debug. */
#ifdef ACPI_LIBRARY
#define ACPI_USE_LOCAL_CACHE
#define ACPI_FULL_DEBUG
#define ACPI_DEBUGGER 1
#define ACPI_DISASSEMBLER 1
#ifdef _DEBUG
#define ACPI_DEBUG_OUTPUT
#endif
#endif
/* Common for all ACPICA applications */
@@ -218,6 +219,9 @@
#elif defined(__HAIKU__)
#include "achaiku.h"
#elif defined(__QNX__)
#include "acqnx.h"
#else
/* Unknown environment */

View File

@@ -73,6 +73,10 @@
#define ACPI_DEBUGGER
#endif
#ifdef CONFIG_ACPI_DEBUG
#define ACPI_MUTEX_DEBUG
#endif
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>

View File

@@ -1,54 +0,0 @@
/******************************************************************************
*
* Name: acmsvcex.h - Extra VC specific defines, etc.
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef __ACMSVCEX_H__
#define __ACMSVCEX_H__
/* Debug support. */
#ifdef _DEBUG
#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
#include <crtdbg.h>
#endif
#endif /* __ACMSVCEX_H__ */

View File

@@ -1,49 +0,0 @@
/******************************************************************************
*
* Name: acwinex.h - Extra OS specific defines, etc.
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef __ACWINEX_H__
#define __ACWINEX_H__
/* Windows uses VC */
#endif /* __ACWINEX_H__ */

View File

@@ -39,6 +39,7 @@
#define ACPI_CSTATE_SYSTEMIO 0
#define ACPI_CSTATE_FFH 1
#define ACPI_CSTATE_HALT 2
#define ACPI_CSTATE_INTEGER 3
#define ACPI_CX_DESC_LEN 32
@@ -67,9 +68,25 @@ struct acpi_processor_cx {
char desc[ACPI_CX_DESC_LEN];
};
struct acpi_lpi_state {
u32 min_residency;
u32 wake_latency; /* worst case */
u32 flags;
u32 arch_flags;
u32 res_cnt_freq;
u32 enable_parent_state;
u64 address;
u8 index;
u8 entry_method;
char desc[ACPI_CX_DESC_LEN];
};
struct acpi_processor_power {
int count;
struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
union {
struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
struct acpi_lpi_state lpi_states[ACPI_PROCESSOR_MAX_POWER];
};
int timer_broadcast_on_state;
};
@@ -189,6 +206,7 @@ struct acpi_processor_flags {
u8 bm_control:1;
u8 bm_check:1;
u8 has_cst:1;
u8 has_lpi:1;
u8 power_setup_done:1;
u8 bm_rld_set:1;
u8 need_hotplug_init:1;
@@ -242,7 +260,7 @@ extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
DECLARE_PER_CPU(struct acpi_processor *, processors);
extern struct acpi_processor_errata errata;
#ifdef ARCH_HAS_POWER_INIT
#if defined(ARCH_HAS_POWER_INIT) && defined(CONFIG_ACPI_PROCESSOR_CSTATE)
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
unsigned int cpu);
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
@@ -309,6 +327,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
/* in processor_core.c */
phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
phys_cpuid_t acpi_map_madt_entry(u32 acpi_id);
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
@@ -371,7 +390,7 @@ extern struct cpuidle_driver acpi_idle_driver;
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
int acpi_processor_power_init(struct acpi_processor *pr);
int acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
#else
static inline int acpi_processor_power_init(struct acpi_processor *pr)
@@ -384,7 +403,7 @@ static inline int acpi_processor_power_exit(struct acpi_processor *pr)
return -ENODEV;
}
static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
{
return -ENODEV;
}

View File

@@ -4,6 +4,19 @@
#include <linux/errno.h> /* for ENODEV */
#include <linux/types.h> /* for bool */
struct acpi_video_brightness_flags {
u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
u8 _BQC_use_index:1; /* _BQC returns an index value */
};
struct acpi_video_device_brightness {
int curr;
int count;
int *levels;
struct acpi_video_brightness_flags flags;
};
struct acpi_device;
#define ACPI_VIDEO_CLASS "video"
@@ -37,8 +50,11 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
* may change over time and should not be cached.
*/
extern bool acpi_video_handles_brightness_key_presses(void);
extern int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level);
#else
static inline int acpi_video_register(void) { return 0; }
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
@@ -56,6 +72,12 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
{
return false;
}
static inline int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level)
{
return -ENODEV;
}
#endif
#endif

View File

@@ -112,6 +112,62 @@ static __always_inline void atomic_long_dec(atomic_long_t *l)
ATOMIC_LONG_PFX(_dec)(v);
}
#define ATOMIC_LONG_FETCH_OP(op, mo) \
static inline long \
atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
}
ATOMIC_LONG_FETCH_OP(add, )
ATOMIC_LONG_FETCH_OP(add, _relaxed)
ATOMIC_LONG_FETCH_OP(add, _acquire)
ATOMIC_LONG_FETCH_OP(add, _release)
ATOMIC_LONG_FETCH_OP(sub, )
ATOMIC_LONG_FETCH_OP(sub, _relaxed)
ATOMIC_LONG_FETCH_OP(sub, _acquire)
ATOMIC_LONG_FETCH_OP(sub, _release)
ATOMIC_LONG_FETCH_OP(and, )
ATOMIC_LONG_FETCH_OP(and, _relaxed)
ATOMIC_LONG_FETCH_OP(and, _acquire)
ATOMIC_LONG_FETCH_OP(and, _release)
ATOMIC_LONG_FETCH_OP(andnot, )
ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
ATOMIC_LONG_FETCH_OP(andnot, _acquire)
ATOMIC_LONG_FETCH_OP(andnot, _release)
ATOMIC_LONG_FETCH_OP(or, )
ATOMIC_LONG_FETCH_OP(or, _relaxed)
ATOMIC_LONG_FETCH_OP(or, _acquire)
ATOMIC_LONG_FETCH_OP(or, _release)
ATOMIC_LONG_FETCH_OP(xor, )
ATOMIC_LONG_FETCH_OP(xor, _relaxed)
ATOMIC_LONG_FETCH_OP(xor, _acquire)
ATOMIC_LONG_FETCH_OP(xor, _release)
#undef ATOMIC_LONG_FETCH_OP
#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \
static inline long \
atomic_long_fetch_##op##mo(atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \
}
ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
#undef ATOMIC_LONG_FETCH_INC_DEC_OP
#define ATOMIC_LONG_OP(op) \
static __always_inline void \
atomic_long_##op(long i, atomic_long_t *l) \
@@ -124,9 +180,9 @@ atomic_long_##op(long i, atomic_long_t *l) \
ATOMIC_LONG_OP(add)
ATOMIC_LONG_OP(sub)
ATOMIC_LONG_OP(and)
ATOMIC_LONG_OP(andnot)
ATOMIC_LONG_OP(or)
ATOMIC_LONG_OP(xor)
ATOMIC_LONG_OP(andnot)
#undef ATOMIC_LONG_OP

View File

@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
}
#else
#include <linux/irqflags.h>
@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = v->counter; \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif /* CONFIG_SMP */
#ifndef atomic_add_return
@@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
#endif
#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
#endif
#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
#endif
#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
#endif
#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
#endif
#ifndef atomic_and
ATOMIC_OP(and, &)
#endif
@@ -110,6 +156,7 @@ ATOMIC_OP(or, |)
ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

View File

@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
#define ATOMIC64_FETCH_OP(op) \
extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

View File

@@ -194,7 +194,7 @@ do { \
})
#endif
#endif
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
#define virt_mb() __smp_mb()
@@ -207,5 +207,44 @@ do { \
#define virt_store_release(p, v) __smp_store_release(p, v)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
* smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
*
* A control dependency provides a LOAD->STORE order, the additional RMB
* provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
* aka. (load)-ACQUIRE.
*
* Architectures that do not do load speculation can have this be barrier().
*/
#ifndef smp_acquire__after_ctrl_dep
#define smp_acquire__after_ctrl_dep() smp_rmb()
#endif
/**
* smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
* @ptr: pointer to the variable to wait on
* @cond: boolean expression to wait for
*
* Equivalent to using smp_load_acquire() on the condition variable but employs
* the control dependency of the wait to reduce the barrier on many platforms.
*
* Due to C lacking lambda expressions we load the value of *ptr into a
* pre-named variable @VAL to be used in @cond.
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
} \
smp_acquire__after_ctrl_dep(); \
VAL; \
})
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */

View File

@@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t;
(__force u64)(__ct)
#define nsecs_to_cputime(__nsecs) \
(__force cputime_t)(__nsecs)
#define nsecs_to_cputime64(__nsecs) \
(__force cputime64_t)(__nsecs)
/*

View File

@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readl_relaxed readl
#endif
#ifndef readq_relaxed
#if defined(readq) && !defined(readq_relaxed)
#define readq_relaxed readq
#endif
@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define writel_relaxed writel
#endif
#ifndef writeq_relaxed
#if defined(writeq) && !defined(writeq_relaxed)
#define writeq_relaxed writeq
#endif
@@ -585,6 +585,16 @@ static inline u32 ioread32(const volatile void __iomem *addr)
}
#endif
#ifdef CONFIG_64BIT
#ifndef ioread64
#define ioread64 ioread64
static inline u64 ioread64(const volatile void __iomem *addr)
{
return readq(addr);
}
#endif
#endif /* CONFIG_64BIT */
#ifndef iowrite8
#define iowrite8 iowrite8
static inline void iowrite8(u8 value, volatile void __iomem *addr)
@@ -609,11 +619,21 @@ static inline void iowrite32(u32 value, volatile void __iomem *addr)
}
#endif
#ifdef CONFIG_64BIT
#ifndef iowrite64
#define iowrite64 iowrite64
static inline void iowrite64(u64 value, volatile void __iomem *addr)
{
writeq(value, addr);
}
#endif
#endif /* CONFIG_64BIT */
#ifndef ioread16be
#define ioread16be ioread16be
static inline u16 ioread16be(const volatile void __iomem *addr)
{
return __be16_to_cpu(__raw_readw(addr));
return swab16(readw(addr));
}
#endif
@@ -621,15 +641,25 @@ static inline u16 ioread16be(const volatile void __iomem *addr)
#define ioread32be ioread32be
static inline u32 ioread32be(const volatile void __iomem *addr)
{
return __be32_to_cpu(__raw_readl(addr));
return swab32(readl(addr));
}
#endif
#ifdef CONFIG_64BIT
#ifndef ioread64be
#define ioread64be ioread64be
static inline u64 ioread64be(const volatile void __iomem *addr)
{
return swab64(readq(addr));
}
#endif
#endif /* CONFIG_64BIT */
#ifndef iowrite16be
#define iowrite16be iowrite16be
static inline void iowrite16be(u16 value, void volatile __iomem *addr)
{
__raw_writew(__cpu_to_be16(value), addr);
writew(swab16(value), addr);
}
#endif
@@ -637,10 +667,20 @@ static inline void iowrite16be(u16 value, void volatile __iomem *addr)
#define iowrite32be iowrite32be
static inline void iowrite32be(u32 value, volatile void __iomem *addr)
{
__raw_writel(__cpu_to_be32(value), addr);
writel(swab32(value), addr);
}
#endif
#ifdef CONFIG_64BIT
#ifndef iowrite64be
#define iowrite64be iowrite64be
static inline void iowrite64be(u64 value, volatile void __iomem *addr)
{
writeq(swab64(value), addr);
}
#endif
#endif /* CONFIG_64BIT */
#ifndef ioread8_rep
#define ioread8_rep ioread8_rep
static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
@@ -668,6 +708,17 @@ static inline void ioread32_rep(const volatile void __iomem *addr,
}
#endif
#ifdef CONFIG_64BIT
#ifndef ioread64_rep
#define ioread64_rep ioread64_rep
static inline void ioread64_rep(const volatile void __iomem *addr,
void *buffer, unsigned int count)
{
readsq(addr, buffer, count);
}
#endif
#endif /* CONFIG_64BIT */
#ifndef iowrite8_rep
#define iowrite8_rep iowrite8_rep
static inline void iowrite8_rep(volatile void __iomem *addr,
@@ -697,6 +748,18 @@ static inline void iowrite32_rep(volatile void __iomem *addr,
writesl(addr, buffer, count);
}
#endif
#ifdef CONFIG_64BIT
#ifndef iowrite64_rep
#define iowrite64_rep iowrite64_rep
static inline void iowrite64_rep(volatile void __iomem *addr,
const void *buffer,
unsigned int count)
{
writesq(addr, buffer, count);
}
#endif
#endif /* CONFIG_64BIT */
#endif /* CONFIG_GENERIC_IOMAP */
#ifdef __KERNEL__

View File

@@ -30,12 +30,20 @@ extern unsigned int ioread16(void __iomem *);
extern unsigned int ioread16be(void __iomem *);
extern unsigned int ioread32(void __iomem *);
extern unsigned int ioread32be(void __iomem *);
#ifdef CONFIG_64BIT
extern u64 ioread64(void __iomem *);
extern u64 ioread64be(void __iomem *);
#endif
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
extern void iowrite32(u32, void __iomem *);
extern void iowrite32be(u32, void __iomem *);
#ifdef CONFIG_64BIT
extern void iowrite64(u64, void __iomem *);
extern void iowrite64be(u64, void __iomem *);
#endif
/*
* "string" versions of the above. Note that they

View File

@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
return 1;
return 0;
}

View File

@@ -91,8 +91,12 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int prev = atomic_xchg_acquire(count, 0);
int prev;
if (atomic_read(count) != 1)
return 0;
prev = atomic_xchg_acquire(count, 0);
if (unlikely(prev < 0)) {
/*
* The lock was marked contended so we must restore that

View File

@@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#define io_remap_pfn_range remap_pfn_range
#endif
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1
#else
#define has_transparent_hugepage() 0
#endif
#endif
#endif /* _ASM_GENERIC_PGTABLE_H */

View File

@@ -7,10 +7,10 @@
static __always_inline int preempt_count(void)
{
return current_thread_info()->preempt_count;
return READ_ONCE(current_thread_info()->preempt_count);
}
static __always_inline int *preempt_count_ptr(void)
static __always_inline volatile int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}

View File

@@ -25,7 +25,20 @@
#include <asm-generic/qrwlock_types.h>
/*
* Writer states & reader shift and bias
* Writer states & reader shift and bias.
*
* | +0 | +1 | +2 | +3 |
* ----+----+----+----+----+
* LE | 78 | 56 | 34 | 12 | 0x12345678
* ----+----+----+----+----+
* | wr | rd |
* +----+----+----+----+
*
* ----+----+----+----+----+
* BE | 12 | 34 | 56 | 78 | 0x12345678
* ----+----+----+----+----+
* | rd | wr |
* +----+----+----+----+
*/
#define _QW_WAITING 1 /* A writer is waiting */
#define _QW_LOCKED 0xff /* A writer holds the lock */
@@ -133,13 +146,23 @@ static inline void queued_read_unlock(struct qrwlock *lock)
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
/**
* __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: the write byte address of a queue rwlock
*/
static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
{
return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
}
/**
* queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
smp_store_release((u8 *)&lock->cnts, 0);
smp_store_release(__qrwlock_write_byte(lock), 0);
}
/*

View File

@@ -21,15 +21,34 @@
#include <asm-generic/qspinlock_types.h>
/**
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
#ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait(struct qspinlock *lock);
#endif
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
* See queued_spin_unlock_wait().
*
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
return atomic_read(&lock->val);
}
#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -92,26 +111,12 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
{
/*
* smp_mb__before_atomic() in order to guarantee release semantics
* unlock() needs release semantics:
*/
smp_mb__before_atomic();
atomic_sub(_Q_LOCKED_VAL, &lock->val);
(void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
}
#endif
/**
* queued_spin_unlock_wait - wait until current lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
cpu_relax();
}
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{

View File

@@ -1,247 +0,0 @@
/*
* include/asm-generic/rtc.h
*
* Author: Tom Rini <trini@mvista.com>
*
* Based on:
* drivers/char/rtc.c
*
* Please read the COPYING file for all license details.
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
/* some dummy definitions */
#define RTC_BATT_BAD 0x100 /* battery bad */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
/*
* Returns true if a clock update is in progress
*/
static inline unsigned char rtc_is_updating(void)
{
unsigned char uip;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irqrestore(&rtc_lock, flags);
return uip;
}
static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
unsigned char century = 0;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
if (rtc_is_updating())
mdelay(20);
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irqsave(&rtc_lock, flags);
time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
time->tm_mon = CMOS_READ(RTC_MONTH);
time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century)
century = CMOS_READ(acpi_gbl_FADT.century);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
time->tm_min = bcd2bin(time->tm_min);
time->tm_hour = bcd2bin(time->tm_hour);
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
century = bcd2bin(century);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
if (century)
time->tm_year += (century - 19) * 100;
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (time->tm_year <= 69)
time->tm_year += 100;
time->tm_mon--;
return RTC_24H;
}
#ifndef get_rtc_time
#define get_rtc_time __get_rtc_time
#endif
/* Set the current date and time in the real time clock. */
static inline int __set_rtc_time(struct rtc_time *time)
{
unsigned long flags;
unsigned char mon, day, hrs, min, sec;
unsigned char save_control, save_freq_select;
unsigned int yrs;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
day = time->tm_mday;
hrs = time->tm_hour;
min = time->tm_min;
sec = time->tm_sec;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
!((yrs + 1900) % 400));
yrs = 72;
/*
* We want to keep the year set to 73 until March
* for non-leap years, so that Feb, 29th is handled
* correctly.
*/
if (!leap_yr && mon < 3) {
real_yrs--;
yrs = 73;
}
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century) {
century = (yrs + 1900) / 100;
yrs %= 100;
}
#endif
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
if (yrs > 169) {
spin_unlock_irqrestore(&rtc_lock, flags);
return -EINVAL;
}
if (yrs >= 100)
yrs -= 100;
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
century = bin2bcd(century);
}
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
#endif
CMOS_WRITE(yrs, RTC_YEAR);
CMOS_WRITE(mon, RTC_MONTH);
CMOS_WRITE(day, RTC_DAY_OF_MONTH);
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century)
CMOS_WRITE(century, acpi_gbl_FADT.century);
#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
#ifndef set_rtc_time
#define set_rtc_time __set_rtc_time
#endif
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __ASM_RTC_H__ */

View File

@@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg_acquire(&sem->count, tmp,
while ((tmp = atomic_long_read(&sem->count)) >= 0) {
if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
static inline void __down_write(struct rw_semaphore *sem)
{
long tmp;
@@ -63,16 +63,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem);
}
static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
long tmp;
tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
return 0;
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -99,14 +106,6 @@ static inline void __up_write(struct rw_semaphore *sem)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
atomic_long_add(delta, (atomic_long_t *)&sem->count);
}
/*
* downgrade write lock to read lock
*/
@@ -127,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_RWSEM_H */

View File

@@ -29,4 +29,18 @@
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
#endif
#ifdef CONFIG_COMPAT
#ifndef get_compat_mode1_syscalls
static inline const int *get_compat_mode1_syscalls(void)
{
static const int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32,
__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
0, /* null terminated */
};
return mode1_syscalls_32;
}
#endif
#endif /* CONFIG_COMPAT */
#endif /* _ASM_GENERIC_SECCOMP_H */

View File

@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
#ifndef HAVE_ARCH_COPY_SIGINFO
#include <linux/string.h>
static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(*to));
else
/* _sigchld is currently the largest know union member */
memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
}
#endif
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif

View File

@@ -107,6 +107,12 @@ struct mmu_gather {
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
/*
* __tlb_adjust_range will track the new addr here,
* that that we can adjust the range after the flush
*/
unsigned long addr;
int page_size;
};
#define HAVE_GENERIC_MMU_GATHER
@@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/* tlb_remove_page
* Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
* required.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (!__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address)
{
tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + PAGE_SIZE);
/*
* Track the last address with which we adjusted the range. This
* will be used later to adjust again after a mmu_flush due to
* failed __tlb_remove_page
*/
tlb->addr = address;
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
if (__tlb_remove_page_size(tlb, page, page_size)) {
tlb_flush_mmu(tlb);
tlb->page_size = page_size;
__tlb_adjust_range(tlb, tlb->addr);
__tlb_remove_page_size(tlb, page, page_size);
}
}
static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
/* tlb_remove_page
* Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
* required.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
{
/* active->nr should be zero when we call this */
VM_BUG_ON_PAGE(tlb->active->nr, page);
tlb->page_size = PAGE_SIZE;
__tlb_adjust_range(tlb, tlb->addr);
return __tlb_remove_page(tlb, page);
}
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,

View File

@@ -72,6 +72,7 @@ struct exception_table_entry
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
/*
* architectures with an MMU should override these two
*/
@@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
-EFAULT; \
((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
size_t n = __copy_from_user(x, ptr, size);
if (unlikely(n)) {
memset(x + (size - n), 0, n);
return -EFAULT;
}
return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
return n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline long copy_to_user(void __user *to,

View File

@@ -164,7 +164,7 @@
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
#define _OF_TABLE_0(name)
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
@@ -245,7 +245,17 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
*(.data..init_task)
VMLINUX_SYMBOL(__start_init_task) = .; \
*(.data..init_task) \
VMLINUX_SYMBOL(__end_init_task) = .;
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
#endif
/*
* Read only Data
@@ -255,7 +265,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(.data..ro_after_init) /* Read only after init */ \
RO_AFTER_INIT_DATA /* Read only after init */ \
*(__vermagic) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
@@ -540,15 +550,19 @@
#define INIT_TEXT \
*(.init.text) \
*(.text.startup) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
*(.fini_array) \
*(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
*(.text.exit) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \

View File

@@ -49,11 +49,16 @@ enum arch_timer_reg {
#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
struct arch_timer_kvm_info {
struct timecounter timecounter;
int virtual_irq;
};
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void);
extern struct timecounter *arch_timer_get_timecounter(void);
extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
#else
@@ -67,11 +72,6 @@ static inline u64 arch_timer_read_counter(void)
return 0;
}
static inline struct timecounter *arch_timer_get_timecounter(void)
{
return NULL;
}
#endif
#endif

View File

@@ -3,10 +3,10 @@
struct clk;
void __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
void __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
int __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
int __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
void sp804_timer_disable(void __iomem *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)

View File

@@ -112,11 +112,12 @@ struct aead_request {
* supplied during the decryption operation. This function is also
* responsible for checking the authentication tag size for
* validity.
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
* @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
* @geniv: see struct skcipher_alg
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -145,6 +146,7 @@ struct aead_alg {
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
struct crypto_alg base;
};
@@ -405,8 +407,7 @@ static inline void aead_request_set_tfm(struct aead_request *req,
* encrypt and decrypt API calls. During the allocation, the provided aead
* handle is registered in the request data structure.
*
* Return: allocated request handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)

View File

@@ -244,6 +244,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
}
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
unsigned int head);
struct crypto_instance *crypto_alloc_instance(const char *name,
@@ -440,8 +442,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
static inline void crypto_yield(u32 flags)
{
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
cond_resched();
#endif
}
#endif /* _CRYPTO_ALGAPI_H */

View File

@@ -16,6 +16,7 @@ struct chacha20_ctx {
u32 key[8];
};
void chacha20_block(u32 *state, void *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize);

View File

@@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask);
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
struct cryptd_ahash {
@@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
/* Must be called without moving CPUs. */
bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
void cryptd_free_ahash(struct cryptd_ahash *tfm);
struct cryptd_aead {
@@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask);
struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
/* Must be called without moving CPUs. */
bool cryptd_aead_queued(struct cryptd_aead *tfm);
void cryptd_free_aead(struct cryptd_aead *tfm);

29
include/crypto/dh.h Normal file
View File

@@ -0,0 +1,29 @@
/*
* Diffie-Hellman secret to be used with kpp API along with helper functions
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_DH_
#define _CRYPTO_DH_
struct dh {
void *key;
void *p;
void *g;
unsigned int key_size;
unsigned int p_size;
unsigned int g_size;
};
int crypto_dh_key_len(const struct dh *params);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
#endif

View File

@@ -43,6 +43,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
@@ -107,14 +108,25 @@ struct drbg_test_data {
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
unsigned char *Vbuf;
/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
unsigned char *C;
unsigned char *Cbuf;
/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
size_t reseed_ctr;
size_t reseed_threshold;
/* some memory the DRBG can use for its operation */
unsigned char *scratchpad;
unsigned char *scratchpadbuf;
void *priv_data; /* Cipher handle */
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
__u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
struct completion ctr_completion; /* CTR mode async handler */
int ctr_async_err; /* CTR mode async error */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
struct work_struct seed_work; /* asynchronous seeding support */

30
include/crypto/ecdh.h Normal file
View File

@@ -0,0 +1,30 @@
/*
* ECDH params to be used with kpp API
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_ECDH_
#define _CRYPTO_ECDH_
/* Curves IDs */
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
struct ecdh {
unsigned short curve_id;
char *key;
unsigned short key_size;
};
int crypto_ecdh_key_len(const struct ecdh *params);
int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
#endif

View File

@@ -547,8 +547,7 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
* the allocation, the provided ahash handle
* is registered in the request data structure.
*
* Return: allocated request handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)

View File

@@ -159,6 +159,27 @@ static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
return req ? container_of(req, struct aead_request, base) : NULL;
}
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
{
return alg->chunksize;
}
/**
* crypto_aead_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CCM. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm)
{
return crypto_aead_alg_chunksize(crypto_aead_alg(tfm));
}
int crypto_register_aead(struct aead_alg *alg);
void crypto_unregister_aead(struct aead_alg *alg);
int crypto_register_aeads(struct aead_alg *algs, int count);

View File

@@ -20,7 +20,7 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
struct crypto_blkcipher *null;
struct crypto_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};

View File

@@ -114,14 +114,10 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_mcryptd_update(struct ahash_request *req,
struct shash_desc *desc);
int shash_ahash_mcryptd_final(struct ahash_request *req,
struct shash_desc *desc);
int shash_ahash_mcryptd_finup(struct ahash_request *req,
struct shash_desc *desc);
int shash_ahash_mcryptd_digest(struct ahash_request *req,
struct shash_desc *desc);
int ahash_mcryptd_update(struct ahash_request *desc);
int ahash_mcryptd_final(struct ahash_request *desc);
int ahash_mcryptd_finup(struct ahash_request *desc);
int ahash_mcryptd_digest(struct ahash_request *desc);
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);

View File

@@ -0,0 +1,64 @@
/*
* Key-agreement Protocol Primitives (KPP)
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_KPP_INT_H
#define _CRYPTO_KPP_INT_H
#include <crypto/kpp.h>
#include <crypto/algapi.h>
/*
* Transform internal helpers.
*/
static inline void *kpp_request_ctx(struct kpp_request *req)
{
return req->__ctx;
}
static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
{
return tfm->base.__crt_ctx;
}
static inline void kpp_request_complete(struct kpp_request *req, int err)
{
req->base.complete(&req->base, err);
}
static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
{
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
* Function registers an implementation of a key-agreement protocol primitive
* algorithm
*
* @alg: algorithm definition
*
* Return: zero on success; error code in case of error
*/
int crypto_register_kpp(struct kpp_alg *alg);
/**
* crypto_unregister_kpp() -- Unregister key-agreement protocol primitive
* algorithm
*
* Function unregisters an implementation of a key-agreement protocol primitive
* algorithm
*
* @alg: algorithm definition
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
#endif

View File

@@ -12,12 +12,44 @@
*/
#ifndef _RSA_HELPER_
#define _RSA_HELPER_
#include <linux/mpi.h>
#include <linux/types.h>
/**
* rsa_key - RSA key structure
* @n : RSA modulus raw byte stream
* @e : RSA public exponent raw byte stream
* @d : RSA private exponent raw byte stream
* @p : RSA prime factor p of n raw byte stream
* @q : RSA prime factor q of n raw byte stream
* @dp : RSA exponent d mod (p - 1) raw byte stream
* @dq : RSA exponent d mod (q - 1) raw byte stream
* @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream
* @n_sz : length in bytes of RSA modulus n
* @e_sz : length in bytes of RSA public exponent
* @d_sz : length in bytes of RSA private exponent
* @p_sz : length in bytes of p field
* @q_sz : length in bytes of q field
* @dp_sz : length in bytes of dp field
* @dq_sz : length in bytes of dq field
* @qinv_sz : length in bytes of qinv field
*/
struct rsa_key {
MPI n;
MPI e;
MPI d;
const u8 *n;
const u8 *e;
const u8 *d;
const u8 *p;
const u8 *q;
const u8 *dp;
const u8 *dq;
const u8 *qinv;
size_t n_sz;
size_t e_sz;
size_t d_sz;
size_t p_sz;
size_t q_sz;
size_t dp_sz;
size_t dq_sz;
size_t qinv_sz;
};
int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
@@ -26,7 +58,5 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);
void rsa_free_key(struct rsa_key *rsa_key);
extern struct crypto_template rsa_pkcs1pad_tmpl;
#endif

View File

@@ -19,12 +19,46 @@
struct rtattr;
struct skcipher_instance {
void (*free)(struct skcipher_instance *inst);
union {
struct {
char head[offsetof(struct skcipher_alg, base)];
struct crypto_instance base;
} s;
struct skcipher_alg alg;
};
};
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
extern const struct crypto_type crypto_givcipher_type;
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
return &inst->s.base;
}
static inline struct skcipher_instance *skcipher_alg_instance(
struct crypto_skcipher *skcipher)
{
return container_of(crypto_skcipher_alg(skcipher),
struct skcipher_instance, alg);
}
static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
{
return crypto_instance_ctx(skcipher_crypto_instance(inst));
}
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
req->base.complete(&req->base, err);
}
static inline void crypto_set_skcipher_spawn(
struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
{
@@ -34,6 +68,12 @@ static inline void crypto_set_skcipher_spawn(
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
const char *name, u32 type, u32 mask)
{
return crypto_grab_skcipher(spawn, name, type, mask);
}
struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
@@ -41,54 +81,42 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
crypto_drop_spawn(&spawn->base);
}
static inline struct crypto_alg *crypto_skcipher_spawn_alg(
static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
struct crypto_skcipher_spawn *spawn)
{
return spawn->base.alg;
return container_of(spawn->base.alg, struct skcipher_alg, base);
}
static inline struct crypto_ablkcipher *crypto_spawn_skcipher(
static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
struct crypto_skcipher_spawn *spawn)
{
return __crypto_ablkcipher_cast(
crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
crypto_skcipher_mask(0)));
return crypto_skcipher_spawn_alg(spawn);
}
int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
const char *crypto_default_geniv(const struct crypto_alg *alg);
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type,
u32 mask);
void skcipher_geniv_free(struct crypto_instance *inst);
int skcipher_geniv_init(struct crypto_tfm *tfm);
void skcipher_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
struct crypto_ablkcipher *geniv)
static inline struct crypto_skcipher *crypto_spawn_skcipher(
struct crypto_skcipher_spawn *spawn)
{
return crypto_ablkcipher_crt(geniv)->base;
return crypto_spawn_tfm2(&spawn->base);
}
static inline int skcipher_enqueue_givcrypt(
struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
static inline struct crypto_skcipher *crypto_spawn_skcipher2(
struct crypto_skcipher_spawn *spawn)
{
return ablkcipher_enqueue_request(queue, &request->creq);
return crypto_spawn_skcipher(spawn);
}
static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
struct crypto_queue *queue)
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
skcipher->reqsize = reqsize;
}
static inline void *skcipher_givcrypt_reqctx(
struct skcipher_givcrypt_request *req)
{
return ablkcipher_request_ctx(&req->creq);
}
int crypto_register_skcipher(struct skcipher_alg *alg);
void crypto_unregister_skcipher(struct skcipher_alg *alg);
int crypto_register_skciphers(struct skcipher_alg *algs, int count);
void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
int err)
@@ -96,12 +124,6 @@ static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
req->base.complete(&req->base, err);
}
static inline void skcipher_givcrypt_complete(
struct skcipher_givcrypt_request *req, int err)
{
ablkcipher_request_complete(&req->creq, err);
}
static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
{
return req->base.flags;
@@ -122,5 +144,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
return req->base.flags;
}
static inline unsigned int crypto_skcipher_alg_min_keysize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.min_keysize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.min_keysize;
return alg->min_keysize;
}
static inline unsigned int crypto_skcipher_alg_max_keysize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.max_keysize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.max_keysize;
return alg->max_keysize;
}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */

330
include/crypto/kpp.h Normal file
View File

@@ -0,0 +1,330 @@
/*
* Key-agreement Protocol Primitives (KPP)
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_KPP_
#define _CRYPTO_KPP_
#include <linux/crypto.h>
/**
* struct kpp_request
*
* @base: Common attributes for async crypto requests
* @src: Source data
* @dst: Destination data
* @src_len: Size of the input buffer
* @dst_len: Size of the output buffer. It needs to be at least
* as big as the expected result depending on the operation
* After operation it will be updated with the actual size of the
* result. In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
* @__ctx: Start of private context data
*/
struct kpp_request {
struct crypto_async_request base;
struct scatterlist *src;
struct scatterlist *dst;
unsigned int src_len;
unsigned int dst_len;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct crypto_kpp - user-instantiated object which encapsulate
* algorithms and core processing logic
*
* @base: Common crypto API algorithm data structure
*/
struct crypto_kpp {
struct crypto_tfm base;
};
/**
* struct kpp_alg - generic key-agreement protocol primitives
*
* @set_secret: Function invokes the protocol specific function to
* store the secret private key along with parameters.
* The implementation knows how to decode thie buffer
* @generate_public_key: Function generate the public key to be sent to the
* counterpart. In case of error, where output is not big
* enough req->dst_len will be updated to the size
* required
* @compute_shared_secret: Function compute the shared secret as defined by
* the algorithm. The result is given back to the user.
* In case of error, where output is not big enough,
* req->dst_len will be updated to the size required
* @max_size: Function returns the size of the output buffer
* @init: Initialize the object. This is called only once at
* instantiation time. In case the cryptographic hardware
* needs to be initialized. Software fallback should be
* put in place here.
* @exit: Undo everything @init did.
*
* @reqsize: Request context size required by algorithm
* implementation
* @base Common crypto API algorithm data structure
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
unsigned int len);
int (*generate_public_key)(struct kpp_request *req);
int (*compute_shared_secret)(struct kpp_request *req);
int (*max_size)(struct crypto_kpp *tfm);
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
unsigned int reqsize;
struct crypto_alg base;
};
/**
* DOC: Generic Key-agreement Protocol Primitevs API
*
* The KPP API is used with the algorithm type
* CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
*/
/**
* crypto_alloc_kpp() - allocate KPP tfm handle
* @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh")
* @type: specifies the type of the algorithm
* @mask: specifies the mask for the algorithm
*
* Allocate a handle for kpp algorithm. The returned struct crypto_kpp
* is requeried for any following API invocation
*
* Return: allocated handle in case of success; IS_ERR() is true in case of
* an error, PTR_ERR() returns the error code.
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
}
static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct kpp_alg, base);
}
static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_kpp, base);
}
static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
{
return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
{
return crypto_kpp_alg(tfm)->reqsize;
}
static inline void kpp_request_set_tfm(struct kpp_request *req,
struct crypto_kpp *tfm)
{
req->base.tfm = crypto_kpp_tfm(tfm);
}
static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
{
return __crypto_kpp_tfm(req->base.tfm);
}
/**
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm));
}
/**
* kpp_request_alloc() - allocates kpp request
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
* @gfp: allocation flags
*
* Return: allocated handle in case of success or NULL in case of an error.
*/
static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
gfp_t gfp)
{
struct kpp_request *req;
req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
if (likely(req))
kpp_request_set_tfm(req, tfm);
return req;
}
/**
* kpp_request_free() - zeroize and free kpp request
*
* @req: request to free
*/
static inline void kpp_request_free(struct kpp_request *req)
{
kzfree(req);
}
/**
* kpp_request_set_callback() - Sets an asynchronous callback.
*
* Callback will be called when an asynchronous operation on a given
* request is finished.
*
* @req: request that the callback will be set for
* @flgs: specify for instance if the operation may backlog
* @cmpl: callback which will be called
* @data: private data used by the caller
*/
static inline void kpp_request_set_callback(struct kpp_request *req,
u32 flgs,
crypto_completion_t cmpl,
void *data)
{
req->base.complete = cmpl;
req->base.data = data;
req->base.flags = flgs;
}
/**
* kpp_request_set_input() - Sets input buffer
*
* Sets parameters required by generate_public_key
*
* @req: kpp request
* @input: ptr to input scatter list
* @input_len: size of the input scatter list
*/
static inline void kpp_request_set_input(struct kpp_request *req,
struct scatterlist *input,
unsigned int input_len)
{
req->src = input;
req->src_len = input_len;
}
/**
* kpp_request_set_output() - Sets output buffer
*
* Sets parameters required by kpp operation
*
* @req: kpp request
* @output: ptr to output scatter list
* @output_len: size of the output scatter list
*/
static inline void kpp_request_set_output(struct kpp_request *req,
struct scatterlist *output,
unsigned int output_len)
{
req->dst = output;
req->dst_len = output_len;
}
enum {
CRYPTO_KPP_SECRET_TYPE_UNKNOWN,
CRYPTO_KPP_SECRET_TYPE_DH,
CRYPTO_KPP_SECRET_TYPE_ECDH,
};
/**
* struct kpp_secret - small header for packing secret buffer
*
* @type: define type of secret. Each kpp type will define its own
* @len: specify the len of the secret, include the header, that
* follows the struct
*/
struct kpp_secret {
unsigned short type;
unsigned short len;
};
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
* Function invokes the specific kpp operation for a given alg.
*
* @tfm: tfm handle
*
* Return: zero on success; error code in case of error
*/
static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
return alg->set_secret(tfm, buffer, len);
}
/**
* crypto_kpp_generate_public_key() - Invoke kpp operation
*
* Function invokes the specific kpp operation for generating the public part
* for a given kpp algorithm
*
* @req: kpp key request
*
* Return: zero on success; error code in case of error
*/
static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
return alg->generate_public_key(req);
}
/**
* crypto_kpp_compute_shared_secret() - Invoke kpp operation
*
* Function invokes the specific kpp operation for computing the shared secret
* for a given kpp algorithm.
*
* @req: kpp key request
*
* Return: zero on success; error code in case of error
*/
static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
return alg->compute_shared_secret(req);
}
/**
* crypto_kpp_maxsize() - Get len for output buffer
*
* Function returns the output buffer size required
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
*
* Return: minimum len for output buffer or error code if key hasn't been set
*/
static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
return alg->max_size(tfm);
}
#endif

View File

@@ -39,7 +39,7 @@ struct mcryptd_instance_ctx {
};
struct mcryptd_hash_ctx {
struct crypto_shash *child;
struct crypto_ahash *child;
struct mcryptd_alg_state *alg_state;
};
@@ -59,13 +59,13 @@ struct mcryptd_hash_request_ctx {
struct crypto_hash_walk walk;
u8 *out;
int flag;
struct shash_desc desc;
struct ahash_request areq;
};
struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask);
struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
struct shash_desc *mcryptd_shash_desc(struct ahash_request *req);
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
void mcryptd_flusher(struct work_struct *work);

View File

@@ -8,7 +8,17 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
struct crypto_blkcipher *crypto_get_default_null_skcipher(void);
struct crypto_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
{
return crypto_get_default_null_skcipher();
}
static inline void crypto_put_default_null_skcipher2(void)
{
crypto_put_default_null_skcipher();
}
#endif

View File

@@ -12,6 +12,7 @@
#ifndef _CRYPTO_PKCS7_H
#define _CRYPTO_PKCS7_H
#include <linux/verification.h>
#include <crypto/public_key.h>
struct key;
@@ -26,14 +27,13 @@ extern void pkcs7_free_message(struct pkcs7_message *pkcs7);
extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
const void **_data, size_t *_datalen,
bool want_wrapper);
size_t *_headerlen);
/*
* pkcs7_trust.c
*/
extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
struct key *trust_keyring,
bool *_trusted);
struct key *trust_keyring);
/*
* pkcs7_verify.c

View File

@@ -14,20 +14,6 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
/*
* The use to which an asymmetric key is being put.
*/
enum key_being_used_for {
VERIFYING_MODULE_SIGNATURE,
VERIFYING_FIRMWARE_SIGNATURE,
VERIFYING_KEXEC_PE_SIGNATURE,
VERIFYING_KEY_SIGNATURE,
VERIFYING_KEY_SELF_SIGNATURE,
VERIFYING_UNSPECIFIED_SIGNATURE,
NR__KEY_BEING_USED_FOR
};
extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
@@ -41,12 +27,13 @@ struct public_key {
const char *pkey_algo;
};
extern void public_key_destroy(void *payload);
extern void public_key_free(struct public_key *key);
/*
* Public key cryptography signature data
*/
struct public_key_signature {
struct asymmetric_key_id *auth_ids[2];
u8 *s; /* Signature */
u32 s_size; /* Number of bytes in signature */
u8 *digest;
@@ -55,17 +42,21 @@ struct public_key_signature {
const char *hash_algo;
};
extern void public_key_signature_free(struct public_key_signature *sig);
extern struct asymmetric_key_subtype public_key_subtype;
struct key;
struct key_type;
union key_payload;
extern int restrict_link_by_signature(struct key *trust_keyring,
const struct key_type *type,
const union key_payload *payload);
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
struct asymmetric_key_id;
extern struct key *x509_request_asymmetric_key(struct key *keyring,
const struct asymmetric_key_id *id,
const struct asymmetric_key_id *skid,
bool partial);
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);

View File

@@ -16,14 +16,10 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
#include <asm/kmap_types.h>
#include <crypto/algapi.h>
#include <linux/hardirq.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg,
@@ -83,17 +79,53 @@ static inline void scatterwalk_unmap(void *vaddr)
kunmap_atomic(vaddr);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
static inline void scatterwalk_start(struct scatter_walk *walk,
struct scatterlist *sg)
{
walk->sg = sg;
walk->offset = sg->offset;
}
static inline void *scatterwalk_map(struct scatter_walk *walk)
{
return kmap_atomic(scatterwalk_page(walk)) +
offset_in_page(walk->offset);
}
static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
if (out) {
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
/* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
* PageSlab cannot be optimised away per se due to
* use of volatile pointer.
*/
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
flush_dcache_page(page);
}
if (more && walk->offset >= walk->sg->offset + walk->sg->length)
scatterwalk_start(walk, sg_next(walk->sg));
}
static inline void scatterwalk_done(struct scatter_walk *walk, int out,
int more)
{
if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
!(walk->offset & (PAGE_SIZE - 1)))
scatterwalk_pagedone(walk, out, more);
}
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
void *scatterwalk_map(struct scatter_walk *walk);
void scatterwalk_done(struct scatter_walk *walk, int out, int more);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);

29
include/crypto/sha3.h Normal file
View File

@@ -0,0 +1,29 @@
/*
* Common values for SHA-3 algorithms
*/
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
struct sha3_state {
u64 st[25];
unsigned int md_len;
unsigned int rsiz;
unsigned int rsizw;
unsigned int partial;
u8 buf[SHA3_224_BLOCK_SIZE];
};
#endif

View File

@@ -65,87 +65,81 @@ struct crypto_skcipher {
struct crypto_tfm base;
};
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MIN_KEY_SIZE" include/crypto/
* @max_keysize: Maximum key size supported by the transformation. This is the
* largest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
* function does modify the transformation context. This function can
* be called multiple times during the existence of the transformation
* object, so one must make sure the key is properly reprogrammed into
* the hardware. This function is also responsible for checking the key
* length for validity. In case a software fallback was put in place in
* the @cra_init call, this function might need to use the fallback if
* the algorithm doesn't support all of the key sizes.
* @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
* the supplied scatterlist containing the blocks of data. The crypto
* API consumer is responsible for aligning the entries of the
* scatterlist properly and making sure the chunks are correctly
* sized. In case a software fallback was put in place in the
* @cra_init call, this function might need to use the fallback if
* the algorithm doesn't support all of the key sizes. In case the
* key was stored in transformation context, the key might need to be
* re-programmed into the hardware in this function. This function
* shall not modify the transformation context, as this function may
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
* after the transformation context was allocated. In case the
* cryptographic hardware has some special requirements which need to
* be handled by software, this function shall check for the precise
* requirement of the transformation and put any software fallbacks
* in place.
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
* @ivsize: IV size applicable for transformation. The consumer must provide an
* IV of exactly that size to perform the encrypt or decrypt operation.
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
* @base: Definition of a generic crypto algorithm.
*
* All fields except @ivsize are mandatory and must be filled.
*/
struct skcipher_alg {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
unsigned int chunksize;
struct crypto_alg base;
};
#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
struct skcipher_givcrypt_request *req)
{
return crypto_ablkcipher_reqtfm(&req->creq);
}
static inline int crypto_skcipher_givencrypt(
struct skcipher_givcrypt_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
return crt->givencrypt(req);
};
static inline int crypto_skcipher_givdecrypt(
struct skcipher_givcrypt_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
return crt->givdecrypt(req);
};
static inline void skcipher_givcrypt_set_tfm(
struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm)
{
req->creq.base.tfm = crypto_ablkcipher_tfm(tfm);
}
static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast(
struct crypto_async_request *req)
{
return container_of(ablkcipher_request_cast(req),
struct skcipher_givcrypt_request, creq);
}
static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
{
struct skcipher_givcrypt_request *req;
req = kmalloc(sizeof(struct skcipher_givcrypt_request) +
crypto_ablkcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_givcrypt_set_tfm(req, tfm);
return req;
}
static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
{
kfree(req);
}
static inline void skcipher_givcrypt_set_callback(
struct skcipher_givcrypt_request *req, u32 flags,
crypto_completion_t compl, void *data)
{
ablkcipher_request_set_callback(&req->creq, flags, compl, data);
}
static inline void skcipher_givcrypt_set_crypt(
struct skcipher_givcrypt_request *req,
struct scatterlist *src, struct scatterlist *dst,
unsigned int nbytes, void *iv)
{
ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv);
}
static inline void skcipher_givcrypt_set_giv(
struct skcipher_givcrypt_request *req, u8 *giv, u64 seq)
{
req->giv = giv;
req->seq = seq;
}
/**
* DOC: Symmetric Key Cipher API
*
@@ -231,12 +225,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type,
crypto_skcipher_mask(mask));
}
/**
* crypto_has_skcipher2() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
* @type: specifies the type of the skcipher
* @mask: specifies the mask for the skcipher
*
* Return: true when the skcipher is known to the kernel crypto API; false
* otherwise
*/
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_skcipher_driver_name(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
struct skcipher_alg, base);
}
static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.ivsize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.ivsize;
return alg->ivsize;
}
/**
* crypto_skcipher_ivsize() - obtain IV size
* @tfm: cipher handle
@@ -251,6 +276,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
return tfm->ivsize;
}
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->chunksize;
}
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
}
/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -425,8 +480,7 @@ static inline struct skcipher_request *skcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure.
*
* Return: allocated request handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct skcipher_request *skcipher_request_alloc(
struct crypto_skcipher *tfm, gfp_t gfp)

View File

@@ -0,0 +1,48 @@
/*
* Analogix DP (Display Port) Core interface driver.
*
* Copyright (C) 2015 Rockchip Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _ANALOGIX_DP_H_
#define _ANALOGIX_DP_H_
#include <drm/drm_crtc.h>
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
RK3399_EDP,
};
static inline bool is_rockchip(enum analogix_dp_devtype type)
{
return type == RK3288_DP || type == RK3399_EDP;
}
struct analogix_dp_plat_data {
enum analogix_dp_devtype dev_type;
struct drm_panel *panel;
struct drm_encoder *encoder;
struct drm_connector *connector;
int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
int (*get_modes)(struct analogix_dp_plat_data *,
struct drm_connector *);
};
int analogix_dp_resume(struct device *dev);
int analogix_dp_suspend(struct device *dev);
int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
struct analogix_dp_plat_data *plat_data);
void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
#endif /* _ANALOGIX_DP_H_ */

View File

@@ -56,6 +56,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -66,6 +67,7 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mem_util.h>
@@ -83,6 +85,8 @@ struct drm_local_map;
struct drm_device_dma;
struct drm_dma_handle;
struct drm_gem_object;
struct drm_master;
struct drm_vblank_crtc;
struct device_node;
struct videomode;
@@ -90,7 +94,7 @@ struct reservation_object;
struct dma_buf_attachment;
/*
* 4 debug categories are defined:
* The following categories are defined:
*
* CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
* This is the category used by the DRM_DEBUG() macro.
@@ -281,13 +285,14 @@ struct drm_ioctl_desc {
/* Event queued up for userspace to read */
struct drm_pending_event {
struct completion *completion;
struct drm_event *event;
struct fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
void (*destroy)(struct drm_pending_event *event);
};
/* initial implementaton using a linked list - todo hashtab */
@@ -299,8 +304,6 @@ struct drm_prime_file_private {
/** File private data */
struct drm_file {
unsigned authenticated :1;
/* Whether we're master for a minor. Protected by master_mutex */
unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1;
/*
@@ -311,10 +314,10 @@ struct drm_file {
/* true if client understands atomic properties */
unsigned atomic:1;
/*
* This client is allowed to gain master privileges for @master.
* This client is the creator of @master.
* Protected by struct drm_device::master_mutex.
*/
unsigned allowed_master:1;
unsigned is_master:1;
struct pid *pid;
kuid_t uid;
@@ -332,7 +335,7 @@ struct drm_file {
void *driver_priv;
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
N.B. not always dev->master */
/**
* fbs - List of framebuffers associated with this file.
*
@@ -371,32 +374,6 @@ struct drm_lock_data {
int idle_has_lock;
};
/**
* struct drm_master - drm master structure
*
* @refcount: Refcount for this master object.
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @magic_map: Map of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
struct drm_master {
struct kref refcount;
struct drm_minor *minor;
char *unique;
int unique_len;
struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
};
/* Size of ringbuffer for vblank timestamps. Just double-buffer
* in initial implementation.
*/
#define DRM_VBLANKTIME_RBSIZE 2
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -420,8 +397,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
int (*suspend) (struct drm_device *, pm_message_t state);
int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
@@ -434,7 +409,7 @@ struct drm_driver {
*
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
* return the value of drm_vblank_count. The DRM core will account for
* use drm_vblank_no_hw_counter() function. The DRM core will account for
* missed vblank events while interrupts where disabled based on system
* timestamps.
*
@@ -452,8 +427,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
* a hardware vblank counter, the driver should use the
* drm_vblank_no_hw_counter() function that keeps a virtual counter.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +442,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
* a hardware vblank counter, the driver should use the
* drm_vblank_no_hw_counter() function that keeps a virtual counter.
*/
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
@@ -573,19 +548,27 @@ struct drm_driver {
int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
bool from_open);
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
bool from_release);
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
int (*debugfs_init)(struct drm_minor *minor);
void (*debugfs_cleanup)(struct drm_minor *minor);
/**
* Driver-specific constructor for drm_gem_objects, to set up
* obj->driver_private.
* @gem_free_object: deconstructor for drm_gem_objects
*
* Returns 0 on success.
* This is deprecated and should not be used by new drivers. Use
* @gem_free_object_unlocked instead.
*/
void (*gem_free_object) (struct drm_gem_object *obj);
/**
* @gem_free_object_unlocked: deconstructor for drm_gem_objects
*
* This is for drivers which are not encumbered with dev->struct_mutex
* legacy locking schemes. Use this hook instead of @gem_free_object.
*/
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -699,38 +682,6 @@ struct drm_minor {
struct list_head debugfs_list;
struct mutex debugfs_lock; /* Protects debugfs_list. */
/* currently active master for this node. Protected by master_mutex */
struct drm_master *master;
};
struct drm_pending_vblank_event {
struct drm_pending_event base;
unsigned int pipe;
struct drm_event_vblank event;
};
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
unsigned int pipe; /* crtc index */
int framedur_ns; /* frame/field duration in ns */
int linedur_ns; /* line duration in ns */
bool enabled; /* so we don't call enable more than
once per disable */
};
/**
@@ -750,6 +701,10 @@ struct drm_device {
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
/* currently active master for this device. Protected by master_mutex */
struct drm_master *master;
atomic_t unplugged; /**< Flag whether dev is dead */
struct inode *anon_inode; /**< inode for private address-space */
char *unique; /**< unique name of the device */
@@ -769,6 +724,7 @@ struct drm_device {
atomic_t buf_alloc; /**< Buffer allocation in progress */
/*@} */
struct mutex filelist_mutex;
struct list_head filelist;
/** \name Memory management */
@@ -804,14 +760,6 @@ struct drm_device {
bool irq_enabled;
int irq;
/*
* At load time, disabling the vblank interrupt won't be allowed since
* old clients may not call the modeset ioctl and therefore misbehave.
* Once the modeset ioctl *has* been called though, we can safely
* disable them when unused.
*/
bool vblank_disable_allowed;
/*
* If true, vblank interrupt will be disabled immediately when the
* refcount drops to zero, as opposed to via the vblank disable
@@ -870,6 +818,8 @@ struct drm_device {
int switch_power_state;
};
#include <drm/drm_irq.h>
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
@@ -926,7 +876,6 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
@@ -957,75 +906,14 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
/* IRQ support (drm_irq.h) */
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
struct drm_pending_vblank_event *e);
extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
* This function returns a pointer to the vblank waitqueue for the CRTC.
* Drivers can use this to implement vblank waits using wait_event() & co.
*/
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
{
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
}
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
/* drm_drv.c */
void drm_put_dev(struct drm_device *dev);
void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
extern bool drm_atomic;
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
@@ -1076,11 +964,13 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent);
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent);
void drm_dev_ref(struct drm_device *dev);
void drm_dev_unref(struct drm_device *dev);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
int drm_dev_set_unique(struct drm_device *dev, const char *name);
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
@@ -1133,7 +1023,6 @@ extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)

View File

@@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
uint32_t type);
struct drm_agp_head *drm_agp_init(struct drm_device *dev);
void drm_agp_clear(struct drm_device *dev);
void drm_legacy_agp_clear(struct drm_device *dev);
int drm_agp_acquire(struct drm_device *dev);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return NULL;
}
static inline void drm_agp_clear(struct drm_device *dev)
static inline void drm_legacy_agp_clear(struct drm_device *dev)
{
}

View File

@@ -30,6 +30,12 @@
#include <drm/drm_crtc.h>
void drm_crtc_commit_put(struct drm_crtc_commit *commit);
static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
{
kref_get(&commit->ref);
}
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -71,7 +77,7 @@ static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtc_states[drm_crtc_index(crtc)];
return state->crtcs[drm_crtc_index(crtc)].state;
}
/**
@@ -86,7 +92,7 @@ static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->plane_states[drm_plane_index(plane)];
return state->planes[drm_plane_index(plane)].state;
}
/**
@@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
if (index >= state->num_connector)
return NULL;
return state->connector_states[index];
return state->connectors[index].state;
}
/**
* __drm_atomic_get_current_plane_state - get current plane state
* @state: global atomic state object
* @plane: plane to grab
*
* This function returns the plane state for the given plane, either from
* @state, or if the plane isn't part of the atomic state update, from @plane.
* This is useful in atomic check callbacks, when drivers need to peek at, but
* not change, state of other planes, since it avoids threading an error code
* back up the call chain.
*
* WARNING:
*
* Note that this function is in general unsafe since it doesn't check for the
* required locking for access state structures. Drivers must ensure that it is
* safe to access the returned state structure through other means. One common
* example is when planes are fixed to a single CRTC, and the driver knows that
* the CRTC lock is held already. In that case holding the CRTC lock gives a
* read-lock on all planes connected to that CRTC. But if planes can be
* reassigned things get more tricky. In that case it's better to use
* drm_atomic_get_plane_state and wire up full error handling.
*
* Returns:
*
* Read-only pointer to the current plane state.
*/
static inline const struct drm_plane_state *
__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
if (state->planes[drm_plane_index(plane)].state)
return state->planes[drm_plane_index(plane)].state;
return plane->state;
}
int __must_check
@@ -137,31 +179,41 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
#define for_each_connector_in_state(state, connector, connector_state, __i) \
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (state)->num_connector && \
((connector) = (state)->connectors[__i], \
(connector_state) = (state)->connector_states[__i], 1); \
(__i) < (__state)->num_connector && \
((connector) = (__state)->connectors[__i].ptr, \
(connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_crtc && \
((crtc) = (state)->crtcs[__i], \
(crtc_state) = (state)->crtc_states[__i], 1); \
(__i) < (__state)->dev->mode_config.num_crtc && \
((crtc) = (__state)->crtcs[__i].ptr, \
(crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
#define for_each_plane_in_state(state, plane, plane_state, __i) \
#define for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_total_plane && \
((plane) = (state)->planes[__i], \
(plane_state) = (state)->plane_states[__i], 1); \
(__i) < (__state)->dev->mode_config.num_total_plane && \
((plane) = (__state)->planes[__i].ptr, \
(plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
/**
* drm_atomic_crtc_needs_modeset - compute combined modeset need
* @state: &drm_crtc_state for the CRTC
*
* To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
* connectors_changed, mode_changed and active_change. This helper simply
* combines these three to compute the overall need for a modeset for @state.
*/
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{

View File

@@ -38,10 +38,13 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async);
bool nonblock);
void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state);
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc);
@@ -69,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic);
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall);
/* nonblocking commit helpers */
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
bool nonblock);
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
/* implementations for legacy interfaces */
int drm_atomic_helper_update_plane(struct drm_plane *plane,
@@ -108,6 +118,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
uint32_t flags);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector *connector);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -115,8 +127,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -125,8 +136,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -142,13 +152,12 @@ struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t start, uint32_t size);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -158,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
* planes which *will* be attached (for ->atomic_check()) see
* drm_crtc_for_each_pending_plane()
* drm_atomic_crtc_state_for_each_plane().
*/
#define drm_atomic_crtc_for_each_plane(plane, crtc) \
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -170,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during (for example)
* ->atomic_check() operations, to validate the incoming state
* ->atomic_check() operations, to validate the incoming state.
*/
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
/**
* drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
* @plane: the loop cursor
* @plane_state: loop cursor for the plane's state, must be const
* @crtc_state: the incoming crtc-state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during (for example)
* ->atomic_check() operations, to validate the incoming state.
*
* Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
* const plane_state. This is useful when a driver just wants to peek at other
* active planes on this crtc, but does not need to change it.
*/
#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
for_each_if ((plane_state = \
__drm_atomic_get_current_plane_state((crtc_state)->state, \
plane)))
/*
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @plane: plane object

59
include/drm/drm_auth.h Normal file
View File

@@ -0,0 +1,59 @@
/*
* Internal Header for the Direct Rendering Manager
*
* Copyright 2016 Intel Corporation
*
* Author: Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_AUTH_H_
#define _DRM_AUTH_H_
/**
* struct drm_master - drm master structure
*
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @magic_map: Map of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*
* Note that master structures are only relevant for the legacy/primary device
* nodes, hence there can only be one per device, not one per drm_minor.
*/
struct drm_master {
struct kref refcount;
struct drm_device *dev;
char *unique;
int unique_len;
struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
};
struct drm_master *drm_master_get(struct drm_master *master);
void drm_master_put(struct drm_master **master);
bool drm_is_current_master(struct drm_file *fpriv);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int degamma_lut_size,
int gamma_lut_size);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);

View File

@@ -73,4 +73,21 @@ struct displayid_tiled_block {
u8 topology_id[8];
} __packed;
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
u8 hactive[2];
u8 hblank[2];
u8 hsync[2];
u8 hsw[2];
u8 vactive[2];
u8 vblank[2];
u8 vsync[2];
u8 vsw[2];
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[0];
};
#endif

View File

@@ -0,0 +1,92 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef DRM_DP_DUAL_MODE_HELPER_H
#define DRM_DP_DUAL_MODE_HELPER_H
#include <linux/types.h>
/*
* Optional for type 1 DVI adaptors
* Mandatory for type 1 HDMI and type 2 adaptors
*/
#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
#define DP_DUAL_MODE_HDMI_ID_LEN 16
/*
* Optional for type 1 adaptors
* Mandatory for type 2 adaptors
*/
#define DP_DUAL_MODE_ADAPTOR_ID 0x10
#define DP_DUAL_MODE_REV_MASK 0x07
#define DP_DUAL_MODE_REV_TYPE2 0x00
#define DP_DUAL_MODE_TYPE_MASK 0xf0
#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
#define DP_DUAL_IEEE_OUI_LEN 3
#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
#define DP_DUAL_DEVICE_ID_LEN 6
#define DP_DUAL_MODE_HARDWARE_REV 0x1a
#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
#define DP_DUAL_MODE_TMDS_OEN 0x20
#define DP_DUAL_MODE_TMDS_DISABLE 0x01
#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
#define DP_DUAL_MODE_CEC_ENABLE 0x01
#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size);
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size);
/**
* enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
* @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
* @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
*/
enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_NONE,
DRM_DP_DUAL_MODE_UNKNOWN,
DRM_DP_DUAL_MODE_TYPE1_DVI,
DRM_DP_DUAL_MODE_TYPE1_HDMI,
DRM_DP_DUAL_MODE_TYPE2_DVI,
DRM_DP_DUAL_MODE_TYPE2_HDMI,
};
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
#endif

View File

@@ -73,6 +73,7 @@
# define DP_ENHANCED_FRAME_CAP (1 << 7)
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
#define DP_NORP 0x004
@@ -621,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -655,6 +657,8 @@ struct edp_vsc_psr {
#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
static inline int
drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
@@ -745,7 +749,14 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
unsigned i2c_nack_count, i2c_defer_count;
/**
* @i2c_nack_count: Counts I2C NACKs, used for DP validation.
*/
unsigned i2c_nack_count;
/**
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -803,6 +814,7 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);

View File

@@ -87,7 +87,15 @@ struct drm_dp_mst_port {
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
struct edid *cached_edid; /* for DP logical ports - make tiling work */
/**
* @cached_edid: for DP logical ports - make tiling work by ensuring
* that the EDID for all connectors is read immediately.
*/
struct edid *cached_edid;
/**
* @has_audio: Tracks whether the sink connector to this port is
* audio-capable.
*/
bool has_audio;
};
@@ -397,71 +405,154 @@ struct drm_dp_payload {
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
* @dev: device pointer for adding i2c devices etc.
* @cbs: callbacks for connector addition and destruction.
* @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
* @aux: aux channel for the DP connector.
* @max_payloads: maximum number of payloads the GPU can generate.
* @conn_base_id: DRM connector ID this mgr is connected to.
* @down_rep_recv: msg receiver state for down replies.
* @up_req_recv: msg receiver state for up requests.
* @lock: protects mst state, primary, dpcd.
* @mst_state: if this manager is enabled for an MST capable port.
* @mst_primary: pointer to the primary branch device.
* @dpcd: cache of DPCD for primary port.
* @pbn_div: PBN to slots divisor.
*
* This struct represents the toplevel displayport MST topology manager.
* There should be one instance of this for every MST capable DP connector
* on the GPU.
*/
struct drm_dp_mst_topology_mgr {
/**
* @dev: device pointer for adding i2c devices etc.
*/
struct device *dev;
/**
* @cbs: callbacks for connector addition and destruction.
*/
const struct drm_dp_mst_topology_cbs *cbs;
/**
* @max_dpcd_transaction_bytes: maximum number of bytes to read/write
* in one go.
*/
int max_dpcd_transaction_bytes;
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
/**
* @aux: AUX channel for the DP MST connector this topolgy mgr is
* controlling.
*/
struct drm_dp_aux *aux;
/**
* @max_payloads: maximum number of payloads the GPU can generate.
*/
int max_payloads;
/**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
*/
int conn_base_id;
/* only ever accessed from the workqueue - which should be serialised */
/**
* @down_rep_recv: Message receiver state for down replies. This and
* @up_req_recv are only ever access from the work item, which is
* serialised.
*/
struct drm_dp_sideband_msg_rx down_rep_recv;
/**
* @up_req_recv: Message receiver state for up requests. This and
* @down_rep_recv are only ever access from the work item, which is
* serialised.
*/
struct drm_dp_sideband_msg_rx up_req_recv;
/* pointer to info about the initial MST device */
struct mutex lock; /* protects mst_state + primary + dpcd */
/**
* @lock: protects mst state, primary, dpcd.
*/
struct mutex lock;
/**
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
bool mst_state;
/**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
/**
* @dpcd: Cache of DPCD for primary port.
*/
u8 dpcd[DP_RECEIVER_CAP_SIZE];
/**
* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
*/
u8 sink_count;
/**
* @pbn_div: PBN to slots divisor.
*/
int pbn_div;
/**
* @total_slots: Total slots that can be allocated.
*/
int total_slots;
/**
* @avail_slots: Still available slots that can be allocated.
*/
int avail_slots;
/**
* @total_pbn: Total PBN count.
*/
int total_pbn;
/* messages to be transmitted */
/* qlock protects the upq/downq and in_progress,
the mstb tx_slots and txmsg->state once they are queued */
/**
* @qlock: protects @tx_msg_downq, the tx_slots in struct
* &drm_dp_mst_branch and txmsg->state once they are queued
*/
struct mutex qlock;
/**
* @tx_msg_downq: List of pending down replies.
*/
struct list_head tx_msg_downq;
bool tx_down_in_progress;
/* payload info + lock for it */
/**
* @payload_lock: Protect payload information.
*/
struct mutex payload_lock;
/**
* @proposed_vcpis: Array of pointers for the new VCPI allocation. The
* VCPI structure itself is embedded into the corresponding
* &drm_dp_mst_port structure.
*/
struct drm_dp_vcpi **proposed_vcpis;
/**
* @payloads: Array of payloads.
*/
struct drm_dp_payload *payloads;
/**
* @payload_mask: Elements of @payloads actually in use. Since
* reallocation of active outputs isn't possible gaps can be created by
* disabling outputs out of order compared to how they've been enabled.
*/
unsigned long payload_mask;
/**
* @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
*/
unsigned long vcpi_mask;
/**
* @tx_waitq: Wait to queue stall for the tx worker.
*/
wait_queue_head_t tx_waitq;
/**
* @work: Probe work.
*/
struct work_struct work;
/**
* @tx_work: Sideband transmit worker. This can nest within the main
* @work worker for each transaction @work launches.
*/
struct work_struct tx_work;
/**
* @destroy_connector_list: List of to be destroyed connectors.
*/
struct list_head destroy_connector_list;
/**
* @destroy_connector_lock: Protects @connector_list.
*/
struct mutex destroy_connector_lock;
/**
* @destroy_connector_work: Work item to destroy connectors. Needed to
* avoid locking inversion.
*/
struct work_struct destroy_connector_work;
};

View File

@@ -328,7 +328,15 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
#else
static inline int drm_load_edid_firmware(struct drm_connector *connector)
{
return 0;
}
#endif
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,

View File

@@ -4,11 +4,18 @@
struct drm_fbdev_cma;
struct drm_gem_cma_object;
struct drm_fb_helper_surface_size;
struct drm_framebuffer_funcs;
struct drm_fb_helper_funcs;
struct drm_framebuffer;
struct drm_fb_helper;
struct drm_device;
struct drm_file;
struct drm_mode_fb_cmd2;
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
unsigned int max_conn_count);
@@ -16,7 +23,18 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
const struct drm_framebuffer_funcs *funcs);
void drm_fb_cma_destroy(struct drm_framebuffer *fb);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int *handle);
struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -24,6 +42,8 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
#ifdef CONFIG_DEBUG_FS
struct seq_file;
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
#endif

View File

@@ -172,6 +172,10 @@ struct drm_fb_helper_connector {
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
* @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
* the screen buffer
* @dirty_lock: spinlock protecting @dirty_clip
* @dirty_work: worker used to flush the framebuffer
*
* This is the main structure used by the fbdev helpers. Drivers supporting
* fbdev emulation should embedded this into their overall driver structure.
@@ -189,6 +193,9 @@ struct drm_fb_helper {
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct drm_clip_rect dirty_clip;
spinlock_t dirty_lock;
struct work_struct dirty_work;
/**
* @kernel_fb_list:
@@ -205,17 +212,6 @@ struct drm_fb_helper {
* needs to be reprobe when fbdev is in control again.
*/
bool delayed_hotplug;
/**
* @atomic:
*
* Use atomic updates for restore_fbdev_mode(), etc. This defaults to
* true if driver has DRIVER_ATOMIC feature flag, but drivers can
* override it to true after drm_fb_helper_init() if they support atomic
* modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
* does not require ASYNC commits).
*/
bool atomic;
};
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -245,6 +241,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
@@ -368,6 +367,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
{
}
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
}
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
char __user *buf, size_t count,
loff_t *ppos)

37
include/drm/drm_fourcc.h Normal file
View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#ifndef __DRM_FOURCC_H__
#define __DRM_FOURCC_H__
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
const char *drm_get_format_name(uint32_t format);
#endif /* __DRM_FOURCC_H__ */

View File

@@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj)
}
/**
* drm_gem_object_unreference - release a GEM BO reference
* __drm_gem_object_unreference - raw function to release a GEM BO reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must hold the dev->struct_mutex
* lock when calling this function, even when the driver doesn't use
* dev->struct_mutex for anything.
* This function is meant to be used by drivers which are not encumbered with
* dev->struct_mutex legacy locking and which are using the
* gem_free_object_unlocked callback. It avoids all the locking checks and
* locking overhead of drm_gem_object_unreference() and
* drm_gem_object_unreference_unlocked().
*
* For drivers not encumbered with legacy locking use
* drm_gem_object_unreference_unlocked() instead.
* Drivers should never call this directly in their code. Instead they should
* wrap it up into a driver_gem_object_unreference(struct driver_gem_object
* *obj) wrapper function, and use that. Shared code should never call this, to
* avoid breaking drivers by accident which still depend upon dev->struct_mutex
* locking.
*/
static inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
__drm_gem_object_unreference(struct drm_gem_object *obj)
{
if (obj != NULL) {
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
kref_put(&obj->refcount, drm_gem_object_free);
}
kref_put(&obj->refcount, drm_gem_object_free);
}
/**
* drm_gem_object_unreference_unlocked - release a GEM BO reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must not hold the
* dev->struct_mutex lock when calling this function.
*/
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev;
if (!obj)
return;
dev = obj->dev;
if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
else
might_lock(&dev->struct_mutex);
}
void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
void drm_gem_object_unreference(struct drm_gem_object *obj);
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
@@ -256,9 +238,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);

183
include/drm/drm_irq.h Normal file
View File

@@ -0,0 +1,183 @@
/*
* Copyright 2016 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_IRQ_H_
#define _DRM_IRQ_H_
#include <linux/seqlock.h>
/**
* struct drm_pending_vblank_event - pending vblank event tracking
*/
struct drm_pending_vblank_event {
/**
* @base: Base structure for tracking pending DRM events.
*/
struct drm_pending_event base;
/**
* @pipe: drm_crtc_index() of the &drm_crtc this event is for.
*/
unsigned int pipe;
/**
* @event: Actual event which will be sent to userspace.
*/
struct drm_event_vblank event;
};
/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
*
* Note that for historical reasons - the vblank handling code is still shared
* with legacy/non-kms drivers - this is a free-standing structure not directly
* connected to struct &drm_crtc. But all public interface functions are taking
* a struct &drm_crtc to hide this implementation detail.
*/
struct drm_vblank_crtc {
/**
* @dev: Pointer to the &drm_device.
*/
struct drm_device *dev;
/**
* @queue: Wait queue for vblank waiters.
*/
wait_queue_head_t queue; /**< VBLANK wait queue */
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
* drm_vblank_offdelay module option and the setting of the
* max_vblank_count value in the &drm_device structure.
*/
struct timer_list disable_timer;
/**
* @seqlock: Protect vblank count and time.
*/
seqlock_t seqlock; /* protects vblank count and time */
/**
* @count: Current software vblank counter.
*/
u32 count;
/**
* @time: Vblank timestamp corresponding to @count.
*/
struct timeval time;
/**
* @refcount: Number of users/waiters of the vblank interrupt. Only when
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
atomic_t refcount; /* number of users of vblank interruptsper crtc */
/**
* @last: Protected by dev->vbl_lock, used for wraparound handling.
*/
u32 last;
/**
* @inmodeset: Tracks whether the vblank is disabled due to a modeset.
* For legacy driver bit 2 additionally tracks whether an additional
* temporary vblank reference has been acquired to paper over the
* hardware counter resetting/jumping. KMS drivers should instead just
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
unsigned int inmodeset; /* Display driver is setting mode */
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
*/
unsigned int pipe;
/**
* @framedur_ns: Frame/Field duration in ns, used by
* drm_calc_vbltimestamp_from_scanoutpos() and computed by
* drm_calc_timestamping_constants().
*/
int framedur_ns;
/**
* @linedur_ns: Line duration in ns, used by
* drm_calc_vbltimestamp_from_scanoutpos() and computed by
* drm_calc_timestamping_constants().
*/
int linedur_ns;
/**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
* disabling functions multiple times.
*/
bool enabled;
};
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
* This function returns a pointer to the vblank waitqueue for the CRTC.
* Drivers can use this to implement vblank waits using wait_event() and related
* functions.
*/
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
{
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
}
#endif

View File

@@ -1,6 +1,8 @@
#ifndef __DRM_DRM_LEGACY_H__
#define __DRM_DRM_LEGACY_H__
#include <drm/drm_auth.h>
/*
* Legacy driver interfaces for the Direct Rendering Manager
*
@@ -154,8 +156,10 @@ struct drm_map_list {
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_p);
int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
void drm_legacy_master_rmmaps(struct drm_device *dev,
struct drm_master *master);
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);

View File

@@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
{
if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
return kmalloc(nmemb * size, gfp);
if (gfp & __GFP_RECLAIMABLE) {
void *ptr = kmalloc(nmemb * size,
gfp | __GFP_NOWARN | __GFP_NORETRY);
if (ptr)
return ptr;
}
return __vmalloc(size * nmemb,
gfp | __GFP_HIGHMEM, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr)
{
kvfree(ptr);

View File

@@ -180,6 +180,8 @@ struct mipi_dsi_device {
unsigned long mode_flags;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
{
return container_of(dev, struct mipi_dsi_device, dev);
@@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);

View File

@@ -169,6 +169,8 @@ enum drm_mode_status {
*
* The horizontal and vertical timings are defined per the following diagram.
*
* ::
*
*
* Active Front Sync Back
* Region Porch Porch

View File

@@ -672,7 +672,7 @@ struct drm_connector_helper_funcs {
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
* make sure that the @display_info, @width_mm and @height_mm fields of the
* struct #drm_connector are filled in.
* struct &drm_connector are filled in.
*
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
* inspect dynamic configuration state should instead use
* @atomic_best_encoder.
*
* You can leave this function to NULL if the connector is only
* attached to a single encoder and you are using the atomic helpers.
* In this case, the core will call drm_atomic_helper_best_encoder()
* for you.
*
* RETURNS:
*
* Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
* need to select the best encoder depending upon the desired
* configuration and can't select it statically.
*
* This function is used by drm_atomic_helper_check_modeset() and either
* this or @best_encoder is required.
* This function is used by drm_atomic_helper_check_modeset().
* If it is not implemented, the core will fallback to @best_encoder
* (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
*
* NOTE:
*
@@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
plane->helper_private = funcs;
}
/**
* struct drm_mode_config_helper_funcs - global modeset helper operations
*
* These helper functions are used by the atomic helpers.
*/
struct drm_mode_config_helper_funcs {
/**
* @atomic_commit_tail:
*
* This hook is used by the default atomic_commit() hook implemented in
* drm_atomic_helper_commit() together with the nonblocking commit
* helpers (see drm_atomic_helper_setup_commit() for a starting point)
* to implement blocking and nonblocking commits easily. It is not used
* by the atomic helpers
*
* This hook should first commit the given atomic state to the hardware.
* But drivers can add more waiting calls at the start of their
* implementation, e.g. to wait for driver-internal request for implicit
* syncing, before starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
* to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
* to be executed by the hardware, for example using
* drm_atomic_helper_wait_for_vblanks(), and then clean up the old
* framebuffers using drm_atomic_helper_cleanup_planes().
*
* When disabling a CRTC this hook _must_ stall for the commit to
* complete. Vblank waits don't work on disabled CRTC, hence the core
* can't take care of this. And it also can't rely on the vblank event,
* since that can be signalled already when the screen shows black,
* which can happen much earlier than the last hardware access needed to
* shut off the display pipeline completely.
*
* This hook is optional, the default implementation is
* drm_atomic_helper_commit_tail().
*/
void (*atomic_commit_tail)(struct drm_atomic_state *state);
};
#endif

View File

@@ -75,6 +75,14 @@ struct drm_panel_funcs {
struct display_timing *timings);
};
/**
* struct drm_panel - DRM panel object
* @drm: DRM device owning the panel
* @connector: DRM connector that the panel is attached to
* @dev: parent device of the panel
* @funcs: operations that can be performed on the panel
* @list: panel entry in registry
*/
struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
@@ -85,6 +93,17 @@ struct drm_panel {
struct list_head list;
};
/**
* drm_disable_unprepare - power off a panel
* @panel: DRM panel
*
* Calling this function will completely power off a panel (assert the panel's
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
*
* Return: 0 on success or a negative error code on failure.
*/
static inline int drm_panel_unprepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->unprepare)
@@ -93,6 +112,16 @@ static inline int drm_panel_unprepare(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
/**
* drm_panel_disable - disable a panel
* @panel: DRM panel
*
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
*
* Return: 0 on success or a negative error code on failure.
*/
static inline int drm_panel_disable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->disable)
@@ -101,6 +130,16 @@ static inline int drm_panel_disable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
/**
* drm_panel_prepare - power on a panel
* @panel: DRM panel
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
* integrated circuitry via a command bus.
*
* Return: 0 on success or a negative error code on failure.
*/
static inline int drm_panel_prepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->prepare)
@@ -109,6 +148,16 @@ static inline int drm_panel_prepare(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
/**
* drm_panel_enable - enable a panel
* @panel: DRM panel
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
* this call completes.
*
* Return: 0 on success or a negative error code on failure.
*/
static inline int drm_panel_enable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->enable)
@@ -117,6 +166,16 @@ static inline int drm_panel_enable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
/**
* drm_panel_get_modes - probe the available display modes of a panel
* @panel: DRM panel
*
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
* Return: The number of modes available from the panel on success or a
* negative error code on failure.
*/
static inline int drm_panel_get_modes(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->get_modes)

View File

@@ -46,6 +46,7 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
unsigned int rotation,
int min_scale,
int max_scale,
bool can_position,

View File

@@ -0,0 +1,94 @@
/*
* Copyright (C) 2016 Noralf Trønnes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
struct drm_simple_display_pipe;
/**
* struct drm_simple_display_pipe_funcs - helper operations for a simple
* display pipeline
*/
struct drm_simple_display_pipe_funcs {
/**
* @enable:
*
* This function should be used to enable the pipeline.
* It is called when the underlying crtc is enabled.
* This hook is optional.
*/
void (*enable)(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state);
/**
* @disable:
*
* This function should be used to disable the pipeline.
* It is called when the underlying crtc is disabled.
* This hook is optional.
*/
void (*disable)(struct drm_simple_display_pipe *pipe);
/**
* @check:
*
* This function is called in the check phase of an atomic update,
* specifically when the underlying plane is checked.
* The simple display pipeline helpers already check that the plane is
* not scaled, fills the entire visible area and is always enabled
* when the crtc is also enabled.
* This hook is optional.
*
* RETURNS:
*
* 0 on success, -EINVAL if the state or the transition can't be
* supported, -ENOMEM on memory allocation failure and -EDEADLK if an
* attempt to obtain another state object ran into a &drm_modeset_lock
* deadlock.
*/
int (*check)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state,
struct drm_crtc_state *crtc_state);
/**
* @update:
*
* This function is called when the underlying plane state is updated.
* This hook is optional.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
};
/**
* struct drm_simple_display_pipe - simple display pipeline
* @crtc: CRTC control structure
* @plane: Plane control structure
* @encoder: Encoder control structure
* @connector: Connector control structure
* @funcs: Pipeline control functions (optional)
*
* Simple display pipeline with plane, crtc and encoder collapsed into one
* entity. It should be initialized by calling drm_simple_display_pipe_init().
*/
struct drm_simple_display_pipe {
struct drm_crtc crtc;
struct drm_plane plane;
struct drm_encoder encoder;
struct drm_connector *connector;
const struct drm_simple_display_pipe_funcs *funcs;
};
int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
struct drm_connector *connector);
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */

View File

@@ -175,19 +175,6 @@ static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
return node->vm_node.size;
}
/**
* drm_vma_node_has_offset() - Check whether node is added to offset manager
* @node: Node to be checked
*
* RETURNS:
* true iff the node was previously allocated an offset and added to
* an vma offset manager.
*/
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
{
return drm_mm_node_allocated(&node->vm_node);
}
/**
* drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
* @node: Linked offset node
@@ -220,7 +207,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
struct address_space *file_mapping)
{
if (drm_vma_node_has_offset(node))
if (drm_mm_node_allocated(&node->vm_node))
unmap_mapping_range(file_mapping,
drm_vma_node_offset_addr(node),
drm_vma_node_size(node) << PAGE_SHIFT, 1);

View File

@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
#define INTEL_BSM_MASK (0xFFFF << 20)
#endif /* _I915_DRM_H_ */

View File

@@ -309,6 +309,7 @@
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
@@ -322,15 +323,12 @@
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \

View File

@@ -13,6 +13,9 @@ void intel_gmch_remove(void);
bool intel_enable_gtt(void);
void intel_gtt_chipset_flush(void);
void intel_gtt_insert_page(dma_addr_t addr,
unsigned int pg,
unsigned int flags);
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);

View File

@@ -173,7 +173,7 @@ struct ttm_tt;
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @priv_flags: Flags describing buffer object internal state.
* @moving: Fence set when BO is moving
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
@@ -239,7 +239,7 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
unsigned long priv_flags;
struct fence *moving;
struct drm_vma_offset_node vma_node;
@@ -314,8 +314,22 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
extern int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait);
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
* @placement: Return immediately if buffer is busy.
* @mem: The struct ttm_mem_reg indicating the region where the bo resides
* @new_flags: Describes compatible placement found
*
* Returns true if the placement is compatible
*/
extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags);
/**
* ttm_bo_validate
*

View File

@@ -258,8 +258,10 @@ struct ttm_mem_type_manager_func {
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
* @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
* @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
* It's set up by the ttm_bo_driver::init_mem_type method.
@@ -286,6 +288,7 @@ struct ttm_mem_type_manager {
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
bool io_reserve_fastpath;
spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
@@ -298,6 +301,11 @@ struct ttm_mem_type_manager {
*/
struct list_head lru;
/*
* Protected by @move_lock.
*/
struct fence *move;
};
/**
@@ -434,6 +442,18 @@ struct ttm_bo_driver {
*/
int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
/**
* Optional driver callback for when BO is removed from the LRU.
* Called with LRU lock held immediately before the removal.
*/
void (*lru_removal)(struct ttm_buffer_object *bo);
/**
* Return the list_head after which a BO should be inserted in the LRU.
*/
struct list_head *(*lru_tail)(struct ttm_buffer_object *bo);
struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo);
};
/**
@@ -491,9 +511,6 @@ struct ttm_bo_global {
#define TTM_NUM_MEM_TYPES 8
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
idling before CPU mapping */
#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
@@ -502,7 +519,6 @@ struct ttm_bo_global {
* @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @val_seq: Current validation sequence.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
@@ -528,7 +544,6 @@ struct ttm_bo_device {
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
uint32_t val_seq;
/*
* Protected by load / firstopen / lastclose /unload sync.
@@ -753,14 +768,16 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo);
struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo);
/**
* __ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_ticket: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @ticket->stamp is older.
* @ticket: ticket used to acquire the ww_mutex.
*
* Will not remove reserved buffers from the lru lists.
* Otherwise identical to ttm_bo_reserve.
@@ -776,8 +793,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* be returned if @use_ticket is set to true.
*/
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_ticket,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
int ret = 0;
@@ -806,8 +822,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_ticket: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @ticket->stamp is older.
* @ticket: ticket used to acquire the ww_mutex.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
@@ -846,15 +861,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* be returned if @use_ticket is set to true.
*/
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_ticket,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
int ret;
WARN_ON(!atomic_read(&bo->kref.refcount));
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
@@ -948,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -962,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -970,6 +985,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -984,7 +1000,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -1002,7 +1019,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
@@ -1014,9 +1030,24 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict, bool no_wait_gpu,
struct fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
* ttm_bo_pipeline_move.
*
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Function for pipelining accelerated moves. Either free the memory
* immediately or hang it on a temporary buffer object.
*/
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
*
@@ -1030,8 +1061,7 @@ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define TTM_HAS_AGP
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
/**

View File

@@ -0,0 +1,19 @@
/*
* Copyright (C) 2014, 2016 Antony Pavlov <antonynpavlov@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#ifndef __DT_BINDINGS_ATH79_CLK_H
#define __DT_BINDINGS_ATH79_CLK_H
#define ATH79_CLK_CPU 0
#define ATH79_CLK_DDR 1
#define ATH79_CLK_AHB 2
#define ATH79_CLK_END 3
#endif /* __DT_BINDINGS_ATH79_CLK_H */

View File

@@ -0,0 +1,38 @@
/*
* ARTPEC-6 clock controller indexes
*
* Copyright 2016 Axis Comunications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
#define DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
#define ARTPEC6_CLK_CPU 0
#define ARTPEC6_CLK_CPU_PERIPH 1
#define ARTPEC6_CLK_NAND_CLKA 2
#define ARTPEC6_CLK_NAND_CLKB 3
#define ARTPEC6_CLK_ETH_ACLK 4
#define ARTPEC6_CLK_DMA_ACLK 5
#define ARTPEC6_CLK_PTP_REF 6
#define ARTPEC6_CLK_SD_PCLK 7
#define ARTPEC6_CLK_SD_IMCLK 8
#define ARTPEC6_CLK_I2S_HST 9
#define ARTPEC6_CLK_I2S0_CLK 10
#define ARTPEC6_CLK_I2S1_CLK 11
#define ARTPEC6_CLK_UART_PCLK 12
#define ARTPEC6_CLK_UART_REFCLK 13
#define ARTPEC6_CLK_I2C 14
#define ARTPEC6_CLK_SPI_PCLK 15
#define ARTPEC6_CLK_SPI_SSPCLK 16
#define ARTPEC6_CLK_SYS_TIMER 17
#define ARTPEC6_CLK_FRACDIV_IN 18
#define ARTPEC6_CLK_DBG_PCLK 19
/* This must be the highest clock index plus one. */
#define ARTPEC6_CLK_NUMCLOCKS 20
#endif

View File

@@ -44,5 +44,23 @@
#define BCM2835_CLOCK_EMMC 28
#define BCM2835_CLOCK_PERI_IMAGE 29
#define BCM2835_CLOCK_PWM 30
#define BCM2835_CLOCK_PCM 31
#define BCM2835_CLOCK_COUNT 31
#define BCM2835_PLLA_DSI0 32
#define BCM2835_PLLA_CCP2 33
#define BCM2835_PLLD_DSI0 34
#define BCM2835_PLLD_DSI1 35
#define BCM2835_CLOCK_AVEO 36
#define BCM2835_CLOCK_DFT 37
#define BCM2835_CLOCK_GP0 38
#define BCM2835_CLOCK_GP1 39
#define BCM2835_CLOCK_GP2 40
#define BCM2835_CLOCK_SLIM 41
#define BCM2835_CLOCK_SMI 42
#define BCM2835_CLOCK_TEC 43
#define BCM2835_CLOCK_DPI 44
#define BCM2835_CLOCK_CAM0 45
#define BCM2835_CLOCK_CAM1 46
#define BCM2835_CLOCK_DSI0E 47
#define BCM2835_CLOCK_DSI1E 48

View File

@@ -79,6 +79,8 @@
#define CLK_MOUT_CORE 58
#define CLK_MOUT_APLL 59
#define CLK_MOUT_ACLK_266_SUB 60
#define CLK_MOUT_UART2 61
#define CLK_MOUT_MMC2 62
/* Dividers */
#define CLK_DIV_GPL 64
@@ -127,6 +129,9 @@
#define CLK_DIV_CORE 107
#define CLK_DIV_HPM 108
#define CLK_DIV_COPY 109
#define CLK_DIV_UART2 110
#define CLK_DIV_MMC2_PRE 111
#define CLK_DIV_MMC2 112
/* Gates */
#define CLK_ASYNC_G3D 128
@@ -223,6 +228,8 @@
#define CLK_BLOCK_MFC 219
#define CLK_BLOCK_CAM 220
#define CLK_SMIES 221
#define CLK_UART2 222
#define CLK_SDMMC2 223
/* Special clocks */
#define CLK_SCLK_JPEG 224
@@ -249,12 +256,14 @@
#define CLK_SCLK_SPI0 245
#define CLK_SCLK_UART1 246
#define CLK_SCLK_UART0 247
#define CLK_SCLK_UART2 248
#define CLK_SCLK_MMC2 249
/*
* Total number of clocks of main CMU.
* NOTE: Must be equal to last clock ID increased by one.
*/
#define CLK_NR_CLKS 248
#define CLK_NR_CLKS 250
/*
* CMU DMC

View File

@@ -1,33 +1,65 @@
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Copyright (c) 2016 Krzysztof Kozlowski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Device Tree binding constants for Exynos5421 clock controller.
*/
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5410_H
/* core clocks */
#define CLK_FIN_PLL 1
#define CLK_FOUT_APLL 2
#define CLK_FOUT_CPLL 3
#define CLK_FOUT_MPLL 4
#define CLK_FOUT_BPLL 5
#define CLK_FOUT_KPLL 6
#define CLK_FIN_PLL 1
#define CLK_FOUT_APLL 2
#define CLK_FOUT_CPLL 3
#define CLK_FOUT_MPLL 4
#define CLK_FOUT_BPLL 5
#define CLK_FOUT_KPLL 6
/* gate for special clocks (sclk) */
#define CLK_SCLK_UART0 128
#define CLK_SCLK_UART1 129
#define CLK_SCLK_UART2 130
#define CLK_SCLK_UART3 131
#define CLK_SCLK_MMC0 132
#define CLK_SCLK_MMC1 133
#define CLK_SCLK_MMC2 134
#define CLK_SCLK_UART0 128
#define CLK_SCLK_UART1 129
#define CLK_SCLK_UART2 130
#define CLK_SCLK_UART3 131
#define CLK_SCLK_MMC0 132
#define CLK_SCLK_MMC1 133
#define CLK_SCLK_MMC2 134
#define CLK_SCLK_USBD300 150
#define CLK_SCLK_USBD301 151
#define CLK_SCLK_USBPHY300 152
#define CLK_SCLK_USBPHY301 153
#define CLK_SCLK_PWM 155
/* gate clocks */
#define CLK_UART0 257
#define CLK_UART1 258
#define CLK_UART2 259
#define CLK_UART3 260
#define CLK_MCT 315
#define CLK_MMC0 351
#define CLK_MMC1 352
#define CLK_MMC2 353
#define CLK_UART0 257
#define CLK_UART1 258
#define CLK_UART2 259
#define CLK_I2C0 261
#define CLK_I2C1 262
#define CLK_I2C2 263
#define CLK_I2C3 264
#define CLK_USI0 265
#define CLK_USI1 266
#define CLK_USI2 267
#define CLK_USI3 268
#define CLK_UART3 260
#define CLK_PWM 279
#define CLK_MCT 315
#define CLK_WDT 316
#define CLK_RTC 317
#define CLK_TMU 318
#define CLK_MMC0 351
#define CLK_MMC1 352
#define CLK_MMC2 353
#define CLK_USBH20 365
#define CLK_USBD300 366
#define CLK_USBD301 367
#define CLK_SSS 471
#define CLK_NR_CLKS 512
#define CLK_NR_CLKS 512
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */

Some files were not shown because too many files have changed in this diff Show More