Merge branches 'arm/exynos', 'arm/omap', 'arm/rockchip', 'arm/mediatek', 'arm/smmu', 'arm/core', 'x86/vt-d', 'x86/amd' and 'core' into next
This commit is contained in:

@@ -762,8 +762,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
||||
return DEV_DMA_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline void acpi_dma_configure(struct device *dev,
|
||||
enum dev_dma_attr attr) { }
|
||||
static inline int acpi_dma_configure(struct device *dev,
|
||||
enum dev_dma_attr attr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void acpi_dma_deconfigure(struct device *dev) { }
|
||||
|
||||
|
@@ -52,7 +52,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
{ return NULL; }
|
||||
#endif
|
||||
|
||||
#define IORT_ACPI_DECLARE(name, table_id, fn) \
|
||||
ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
|
||||
|
||||
#endif /* __ACPI_IORT_H__ */
|
||||
|
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
atomic_t nr_active;
|
||||
|
||||
struct delayed_work delayed_run_work;
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct hlist_node cpuhp_dead;
|
||||
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
|
@@ -610,7 +610,6 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
||||
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
|
||||
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
|
||||
#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
@@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
||||
struct bio *next)
|
||||
static inline bool bio_will_gap(struct request_queue *q,
|
||||
struct request *prev_rq,
|
||||
struct bio *prev,
|
||||
struct bio *next)
|
||||
{
|
||||
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
||||
struct bio_vec pb, nb;
|
||||
|
||||
/*
|
||||
* don't merge if the 1st bio starts with non-zero
|
||||
* offset, otherwise it is quite difficult to respect
|
||||
* sg gap limit. We work hard to merge a huge number of small
|
||||
* single bios in case of mkfs.
|
||||
*/
|
||||
if (prev_rq)
|
||||
bio_get_first_bvec(prev_rq->bio, &pb);
|
||||
else
|
||||
bio_get_first_bvec(prev, &pb);
|
||||
if (pb.bv_offset)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We don't need to worry about the situation that the
|
||||
* merged segment ends in unaligned virt boundary:
|
||||
*
|
||||
* - if 'pb' ends aligned, the merged segment ends aligned
|
||||
* - if 'pb' ends unaligned, the next bio must include
|
||||
* one single bvec of 'nb', otherwise the 'nb' can't
|
||||
* merge with 'pb'
|
||||
*/
|
||||
bio_get_last_bvec(prev, &pb);
|
||||
bio_get_first_bvec(next, &nb);
|
||||
|
||||
@@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
||||
|
||||
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, req->biotail, bio);
|
||||
return bio_will_gap(req->q, req, req->biotail, bio);
|
||||
}
|
||||
|
||||
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, bio, req->bio);
|
||||
return bio_will_gap(req->q, NULL, bio, req->bio);
|
||||
}
|
||||
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
|
@@ -556,7 +556,7 @@ enum ccp_engine {
|
||||
* struct ccp_cmd - CCP operation request
|
||||
* @entry: list element (ccp driver use only)
|
||||
* @work: work element used for callbacks (ccp driver use only)
|
||||
* @ccp: CCP device to be run on (ccp driver use only)
|
||||
* @ccp: CCP device to be run on
|
||||
* @ret: operation return code (ccp driver use only)
|
||||
* @flags: cmd processing flags
|
||||
* @engine: CCP operation to perform
|
||||
|
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
||||
pr_cont_kernfs_path(cgrp->kn);
|
||||
}
|
||||
|
||||
static inline void cgroup_init_kthreadd(void)
|
||||
{
|
||||
/*
|
||||
* kthreadd is inherited by all kthreads, keep it in the root so
|
||||
* that the new kthreads are guaranteed to stay in the root until
|
||||
* initialization is finished.
|
||||
*/
|
||||
current->no_cgroup_migration = 1;
|
||||
}
|
||||
|
||||
static inline void cgroup_kthread_ready(void)
|
||||
{
|
||||
/*
|
||||
* This kthread finished initialization. The creator should have
|
||||
* set PF_NO_SETAFFINITY if this kthread should stay in the root.
|
||||
*/
|
||||
current->no_cgroup_migration = 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
struct cgroup_subsys_state;
|
||||
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
|
||||
|
||||
static inline int cgroup_init_early(void) { return 0; }
|
||||
static inline int cgroup_init(void) { return 0; }
|
||||
static inline void cgroup_init_kthreadd(void) {}
|
||||
static inline void cgroup_kthread_ready(void) {}
|
||||
|
||||
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
|
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
|
||||
|
||||
#ifdef CONFIG_CLKEVT_PROBE
|
||||
extern int clockevent_probe(void);
|
||||
#els
|
||||
#else
|
||||
static inline int clockevent_probe(void) { return 0; }
|
||||
#endif
|
||||
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
@@ -71,6 +72,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
#else
|
||||
|
||||
@@ -100,6 +102,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
@@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev,
|
||||
}
|
||||
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
int dma_configure(struct device *dev);
|
||||
void dma_deconfigure(struct device *dev);
|
||||
#else
|
||||
static inline int dma_configure(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dma_deconfigure(struct device *dev) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Managed DMA API
|
||||
*/
|
||||
|
@@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
||||
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
||||
extern int dmar_disabled;
|
||||
extern int intel_iommu_enabled;
|
||||
extern int intel_iommu_tboot_noforce;
|
||||
#else
|
||||
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
{
|
||||
|
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
||||
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
||||
extern struct elevator_queue *elevator_alloc(struct request_queue *,
|
||||
|
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
|
||||
struct sock_extended_err ee;
|
||||
u16 addr_offset;
|
||||
__be16 port;
|
||||
u8 opt_stats:1,
|
||||
unused:7;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -87,7 +87,6 @@ struct fscrypt_operations {
|
||||
unsigned int flags;
|
||||
const char *key_prefix;
|
||||
int (*get_context)(struct inode *, void *, size_t);
|
||||
int (*prepare_context)(struct inode *);
|
||||
int (*set_context)(struct inode *, const void *, size_t, void *);
|
||||
int (*dummy_context)(struct inode *);
|
||||
bool (*is_encrypted)(struct inode *);
|
||||
|
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
|
||||
struct fwnode_handle *child,
|
||||
enum gpiod_flags flags,
|
||||
const char *label);
|
||||
/* FIXME: delete this helper when users are switched over */
|
||||
static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
|
||||
const char *con_id, struct fwnode_handle *child)
|
||||
{
|
||||
return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
|
||||
0, child,
|
||||
GPIOD_ASIS,
|
||||
"?");
|
||||
}
|
||||
|
||||
#else /* CONFIG_GPIOLIB */
|
||||
|
||||
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
/* FIXME: delete this when all users are switched over */
|
||||
static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
|
||||
const char *con_id, struct fwnode_handle *child)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GPIOLIB */
|
||||
|
||||
static inline
|
||||
|
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
|
||||
#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
|
||||
#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
|
||||
#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
|
||||
#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
|
||||
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
|
||||
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
|
||||
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
|
||||
|
@@ -845,6 +845,13 @@ struct vmbus_channel {
|
||||
* link up channels based on their CPU affinity.
|
||||
*/
|
||||
struct list_head percpu_list;
|
||||
|
||||
/*
|
||||
* Defer freeing channel until after all cpu's have
|
||||
* gone through grace period.
|
||||
*/
|
||||
struct rcu_head rcu;
|
||||
|
||||
/*
|
||||
* For performance critical channels (storage, networking
|
||||
* etc,), Hyper-V has a mechanism to enhance the throughput
|
||||
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
|
||||
const int *srv_version, int srv_vercnt,
|
||||
int *nego_fw_version, int *nego_srv_version);
|
||||
|
||||
void hv_event_tasklet_disable(struct vmbus_channel *channel);
|
||||
void hv_event_tasklet_enable(struct vmbus_channel *channel);
|
||||
|
||||
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
|
||||
|
||||
void vmbus_setevent(struct vmbus_channel *channel);
|
||||
|
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
|
||||
const char *name,
|
||||
struct config_item_type *type)
|
||||
{
|
||||
#ifdef CONFIG_CONFIGFS_FS
|
||||
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
|
||||
config_group_init_type_name(&d->group, name, type);
|
||||
#endif
|
||||
}
|
||||
|
@@ -30,6 +30,8 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/iommu.h>
|
||||
|
||||
@@ -72,24 +74,8 @@
|
||||
|
||||
#define OFFSET_STRIDE (9)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define dmar_readq(a) readq(a)
|
||||
#define dmar_writeq(a,v) writeq(v,a)
|
||||
#else
|
||||
static inline u64 dmar_readq(void __iomem *addr)
|
||||
{
|
||||
u32 lo, hi;
|
||||
lo = readl(addr);
|
||||
hi = readl(addr + 4);
|
||||
return (((u64) hi) << 32) + lo;
|
||||
}
|
||||
|
||||
static inline void dmar_writeq(void __iomem *addr, u64 val)
|
||||
{
|
||||
writel((u32)val, addr);
|
||||
writel((u32)(val >> 32), addr + 4);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
|
||||
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
|
||||
|
@@ -19,12 +19,12 @@
|
||||
#ifndef __LINUX_IOMMU_H
|
||||
#define __LINUX_IOMMU_H
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
#define IOMMU_READ (1 << 0)
|
||||
#define IOMMU_WRITE (1 << 1)
|
||||
@@ -32,10 +32,13 @@
|
||||
#define IOMMU_NOEXEC (1 << 3)
|
||||
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
|
||||
/*
|
||||
* This is to make the IOMMU API setup privileged
|
||||
* mapppings accessible by the master only at higher
|
||||
* privileged execution level and inaccessible at
|
||||
* less privileged levels.
|
||||
* Where the bus hardware includes a privilege level as part of its access type
|
||||
* markings, and certain devices are capable of issuing transactions marked as
|
||||
* either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
|
||||
* given permission flags only apply to accesses at the higher privilege level,
|
||||
* and that unprivileged transactions should have as little access as possible.
|
||||
* This would usually imply the same permissions as kernel mappings on the CPU,
|
||||
* if the IOMMU page table format is equivalent.
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
|
||||
@@ -125,9 +128,16 @@ enum iommu_attr {
|
||||
};
|
||||
|
||||
/* These are the possible reserved region types */
|
||||
#define IOMMU_RESV_DIRECT (1 << 0)
|
||||
#define IOMMU_RESV_RESERVED (1 << 1)
|
||||
#define IOMMU_RESV_MSI (1 << 2)
|
||||
enum iommu_resv_type {
|
||||
/* Memory regions which must be mapped 1:1 at all times */
|
||||
IOMMU_RESV_DIRECT,
|
||||
/* Arbitrary "never map this or give it to a device" address ranges */
|
||||
IOMMU_RESV_RESERVED,
|
||||
/* Hardware MSI region (untranslated) */
|
||||
IOMMU_RESV_MSI,
|
||||
/* Software-managed MSI translation window */
|
||||
IOMMU_RESV_SW_MSI,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_resv_region - descriptor for a reserved memory region
|
||||
@@ -142,7 +152,7 @@ struct iommu_resv_region {
|
||||
phys_addr_t start;
|
||||
size_t length;
|
||||
int prot;
|
||||
int type;
|
||||
enum iommu_resv_type type;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
@@ -288,7 +298,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern int iommu_request_dm_for_dev(struct device *dev);
|
||||
extern struct iommu_resv_region *
|
||||
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
|
||||
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
|
||||
enum iommu_resv_type type);
|
||||
extern int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||
struct list_head *head);
|
||||
|
||||
@@ -328,46 +339,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t offset, u64 size,
|
||||
int prot);
|
||||
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
|
||||
/**
|
||||
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
|
||||
* @domain: the iommu domain where the fault has happened
|
||||
* @dev: the device where the fault has happened
|
||||
* @iova: the faulting address
|
||||
* @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
|
||||
*
|
||||
* This function should be called by the low-level IOMMU implementations
|
||||
* whenever IOMMU faults happen, to allow high-level users, that are
|
||||
* interested in such events, to know about them.
|
||||
*
|
||||
* This event may be useful for several possible use cases:
|
||||
* - mere logging of the event
|
||||
* - dynamic TLB/PTE loading
|
||||
* - if restarting of the faulting device is required
|
||||
*
|
||||
* Returns 0 on success and an appropriate error code otherwise (if dynamic
|
||||
* PTE/TLB loading will one day be supported, implementations will be able
|
||||
* to tell whether it succeeded or not according to this return value).
|
||||
*
|
||||
* Specifically, -ENOSYS is returned if a fault handler isn't installed
|
||||
* (though fault handlers can also return -ENOSYS, in case they want to
|
||||
* elicit the default behavior of the IOMMU drivers).
|
||||
*/
|
||||
static inline int report_iommu_fault(struct iommu_domain *domain,
|
||||
struct device *dev, unsigned long iova, int flags)
|
||||
{
|
||||
int ret = -ENOSYS;
|
||||
|
||||
/*
|
||||
* if upper layers showed interest and installed a fault handler,
|
||||
* invoke it.
|
||||
*/
|
||||
if (domain->handler)
|
||||
ret = domain->handler(domain, dev, iova, flags,
|
||||
domain->handler_token);
|
||||
|
||||
trace_io_page_fault(dev, iova, flags);
|
||||
return ret;
|
||||
}
|
||||
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
|
||||
unsigned long iova, int flags);
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
|
@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
||||
return iova >> iova_shift(iovad);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IOMMU_IOVA)
|
||||
int iova_cache_get(void);
|
||||
void iova_cache_put(void);
|
||||
|
||||
@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad);
|
||||
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
||||
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
||||
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
||||
#else
|
||||
static inline int iova_cache_get(void)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline void iova_cache_put(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct iova *alloc_iova_mem(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void free_iova_mem(struct iova *iova)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct iova *alloc_iova(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn,
|
||||
bool size_aligned)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void free_iova_fast(struct iova_domain *iovad,
|
||||
unsigned long pfn,
|
||||
unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct iova *reserve_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn_lo,
|
||||
unsigned long pfn_hi)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void copy_reserved_iova(struct iova_domain *from,
|
||||
struct iova_domain *to)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_iova_domain(struct iova_domain *iovad,
|
||||
unsigned long granule,
|
||||
unsigned long start_pfn,
|
||||
unsigned long pfn_32bit)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct iova *find_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void put_iova_domain(struct iova_domain *iovad)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
||||
struct iova *iova,
|
||||
unsigned long pfn_lo,
|
||||
unsigned long pfn_hi)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void free_cpu_cached_iovas(unsigned int cpu,
|
||||
struct iova_domain *iovad)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -96,6 +96,9 @@
|
||||
#define GICH_MISR_EOI (1 << 0)
|
||||
#define GICH_MISR_U (1 << 1)
|
||||
|
||||
#define GICV_PMR_PRIORITY_SHIFT 3
|
||||
#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
|
@@ -76,6 +76,9 @@ size_t ksize(const void *);
|
||||
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
|
||||
bool kasan_save_enable_multi_shot(void);
|
||||
void kasan_restore_multi_shot(bool enabled);
|
||||
|
||||
#else /* CONFIG_KASAN */
|
||||
|
||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||
|
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, void *val);
|
||||
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, struct kvm_io_device *dev);
|
||||
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
struct kvm_io_device *dev);
|
||||
void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
struct kvm_io_device *dev);
|
||||
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
gpa_t addr);
|
||||
|
||||
|
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_update_page_stat(struct page *page,
|
||||
enum mem_cgroup_stat_index idx,
|
||||
int nr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
|
@@ -35,10 +35,11 @@
|
||||
* Max bus-specific overhead incurred by request/responses.
|
||||
* I2C requires 1 additional byte for requests.
|
||||
* I2C requires 2 additional bytes for responses.
|
||||
* SPI requires up to 32 additional bytes for responses.
|
||||
* */
|
||||
#define EC_PROTO_VERSION_UNKNOWN 0
|
||||
#define EC_MAX_REQUEST_OVERHEAD 1
|
||||
#define EC_MAX_RESPONSE_OVERHEAD 2
|
||||
#define EC_MAX_RESPONSE_OVERHEAD 32
|
||||
|
||||
/*
|
||||
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
|
||||
|
@@ -476,6 +476,7 @@ enum {
|
||||
enum {
|
||||
MLX4_INTERFACE_STATE_UP = 1 << 0,
|
||||
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
|
||||
MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
|
||||
};
|
||||
|
||||
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
|
||||
|
@@ -32,6 +32,8 @@ struct user_struct;
|
||||
struct writeback_control;
|
||||
struct bdi_writeback;
|
||||
|
||||
void init_mm_internals(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
||||
|
@@ -53,7 +53,7 @@ struct sdio_func {
|
||||
unsigned int state; /* function state */
|
||||
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
|
||||
|
||||
u8 tmpbuf[4]; /* DMA:able scratch buffer */
|
||||
u8 *tmpbuf; /* DMA:able scratch buffer */
|
||||
|
||||
unsigned num_info; /* number of info strings */
|
||||
const char **info; /* info strings */
|
||||
|
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
___pud; \
|
||||
})
|
||||
|
||||
#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
|
||||
({ \
|
||||
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
|
||||
pmd_t ___pmd; \
|
||||
\
|
||||
___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
|
||||
mmu_notifier_invalidate_range(__mm, ___haddr, \
|
||||
___haddr + HPAGE_PMD_SIZE); \
|
||||
\
|
||||
___pmd; \
|
||||
})
|
||||
|
||||
/*
|
||||
* set_pte_at_notify() sets the pte _after_ running the notifier.
|
||||
* This is safe to start by updating the secondary MMUs, because the primary MMU
|
||||
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
#define ptep_clear_flush_notify ptep_clear_flush
|
||||
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
|
||||
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
|
||||
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
|
||||
#define set_pte_at_notify set_pte_at
|
||||
|
||||
#endif /* CONFIG_MMU_NOTIFIER */
|
||||
|
@@ -64,26 +64,26 @@ enum {
|
||||
* RDMA_QPTYPE field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
|
||||
NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
|
||||
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
|
||||
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
|
||||
};
|
||||
|
||||
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
|
||||
* RDMA_QPTYPE field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
|
||||
NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
|
||||
NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
|
||||
NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
|
||||
NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
|
||||
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
|
||||
NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
|
||||
NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
|
||||
NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
|
||||
NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
|
||||
};
|
||||
|
||||
/* RDMA Connection Management Service Type codes for Discovery Log Page
|
||||
* entry TSAS RDMA_CMS field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
|
||||
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
|
||||
};
|
||||
|
||||
#define NVMF_AQ_DEPTH 32
|
||||
|
@@ -55,7 +55,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
|
||||
return of_node_get(cpu_dev->of_node);
|
||||
}
|
||||
|
||||
void of_dma_configure(struct device *dev, struct device_node *np);
|
||||
int of_dma_configure(struct device *dev, struct device_node *np);
|
||||
void of_dma_deconfigure(struct device *dev);
|
||||
#else /* CONFIG_OF */
|
||||
|
||||
static inline int of_driver_match_device(struct device *dev,
|
||||
@@ -103,7 +104,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void of_dma_configure(struct device *dev, struct device_node *np)
|
||||
|
||||
static inline int of_dma_configure(struct device *dev, struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void of_dma_deconfigure(struct device *dev)
|
||||
{}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
|
@@ -76,22 +76,12 @@ struct gpmc_timings;
|
||||
struct omap_nand_platform_data;
|
||||
struct omap_onenand_platform_data;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
|
||||
extern int gpmc_nand_init(struct omap_nand_platform_data *d,
|
||||
struct gpmc_timings *gpmc_t);
|
||||
#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
|
||||
extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
|
||||
#else
|
||||
static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
|
||||
struct gpmc_timings *gpmc_t)
|
||||
#define board_onenand_data NULL
|
||||
static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
|
||||
extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
|
||||
#else
|
||||
#define board_onenand_data NULL
|
||||
static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@@ -145,8 +145,9 @@ struct pinctrl_desc {
|
||||
extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
|
||||
struct device *dev, void *driver_data,
|
||||
struct pinctrl_dev **pctldev);
|
||||
extern int pinctrl_enable(struct pinctrl_dev *pctldev);
|
||||
|
||||
/* Please use pinctrl_register_and_init() instead */
|
||||
/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
|
||||
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
|
||||
struct device *dev, void *driver_data);
|
||||
|
||||
|
@@ -12,28 +12,8 @@
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define MMU_REG_SIZE 256
|
||||
|
||||
/**
|
||||
* struct iommu_arch_data - omap iommu private data
|
||||
* @name: name of the iommu device
|
||||
* @iommu_dev: handle of the iommu device
|
||||
*
|
||||
* This is an omap iommu private data object, which binds an iommu user
|
||||
* to its iommu device. This object should be placed at the iommu user's
|
||||
* dev_archdata so generic IOMMU API can be used without having to
|
||||
* utilize omap-specific plumbing anymore.
|
||||
*/
|
||||
struct omap_iommu_arch_data {
|
||||
const char *name;
|
||||
struct omap_iommu *iommu_dev;
|
||||
};
|
||||
|
||||
struct iommu_platform_data {
|
||||
const char *name;
|
||||
const char *reset_name;
|
||||
int nr_tlb_entries;
|
||||
|
||||
int (*assert_reset)(struct platform_device *pdev, const char *name);
|
||||
int (*deassert_reset)(struct platform_device *pdev, const char *name);
|
||||
};
|
||||
|
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
|
||||
struct reset_control *__of_reset_control_get(struct device_node *node,
|
||||
const char *id, int index, bool shared,
|
||||
bool optional);
|
||||
struct reset_control *__reset_control_get(struct device *dev, const char *id,
|
||||
int index, bool shared,
|
||||
bool optional);
|
||||
void reset_control_put(struct reset_control *rstc);
|
||||
struct reset_control *__devm_reset_control_get(struct device *dev,
|
||||
const char *id, int index, bool shared,
|
||||
@@ -31,31 +34,26 @@ static inline int device_reset_optional(struct device *dev)
|
||||
|
||||
static inline int reset_control_reset(struct reset_control *rstc)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int reset_control_assert(struct reset_control *rstc)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int reset_control_deassert(struct reset_control *rstc)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int reset_control_status(struct reset_control *rstc)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_control_put(struct reset_control *rstc)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline int __must_check device_reset(struct device *dev)
|
||||
@@ -74,14 +72,21 @@ static inline struct reset_control *__of_reset_control_get(
|
||||
const char *id, int index, bool shared,
|
||||
bool optional)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct reset_control *__reset_control_get(
|
||||
struct device *dev, const char *id,
|
||||
int index, bool shared, bool optional)
|
||||
{
|
||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct reset_control *__devm_reset_control_get(
|
||||
struct device *dev, const char *id,
|
||||
int index, bool shared, bool optional)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RESET_CONTROLLER */
|
||||
@@ -107,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
||||
#ifndef CONFIG_RESET_CONTROLLER
|
||||
WARN_ON(1);
|
||||
#endif
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
||||
false);
|
||||
return __reset_control_get(dev, id, 0, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -136,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
||||
static inline struct reset_control *reset_control_get_shared(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
||||
false);
|
||||
return __reset_control_get(dev, id, 0, true, false);
|
||||
}
|
||||
|
||||
static inline struct reset_control *reset_control_get_optional_exclusive(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
||||
true);
|
||||
return __reset_control_get(dev, id, 0, false, true);
|
||||
}
|
||||
|
||||
static inline struct reset_control *reset_control_get_optional_shared(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
||||
true);
|
||||
return __reset_control_get(dev, id, 0, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -604,6 +604,10 @@ struct task_struct {
|
||||
#ifdef CONFIG_COMPAT_BRK
|
||||
unsigned brk_randomized:1;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
/* disallow userland-initiated cgroup migration */
|
||||
unsigned no_cgroup_migration:1;
|
||||
#endif
|
||||
|
||||
unsigned long atomic_flags; /* Flags requiring atomic access. */
|
||||
|
||||
|
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init_late(void);
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
extern int sched_clock_stable(void);
|
||||
extern void clear_sched_clock_stable(void);
|
||||
|
||||
/*
|
||||
* When sched_clock_stable(), __sched_clock_offset provides the offset
|
||||
* between local_clock() and sched_clock().
|
||||
*/
|
||||
extern u64 __sched_clock_offset;
|
||||
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
@@ -26,6 +26,7 @@ struct kstat {
|
||||
unsigned int nlink;
|
||||
uint32_t blksize; /* Preferred I/O size */
|
||||
u64 attributes;
|
||||
u64 attributes_mask;
|
||||
#define KSTAT_ATTR_FS_IOC_FLAGS \
|
||||
(STATX_ATTR_COMPRESSED | \
|
||||
STATX_ATTR_IMMUTABLE | \
|
||||
|
@@ -39,7 +39,10 @@ struct iov_iter {
|
||||
};
|
||||
union {
|
||||
unsigned long nr_segs;
|
||||
int idx;
|
||||
struct {
|
||||
int idx;
|
||||
int start_idx;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
||||
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
void iov_iter_revert(struct iov_iter *i, size_t bytes);
|
||||
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
|
@@ -50,4 +50,10 @@
|
||||
/* device can't handle Link Power Management */
|
||||
#define USB_QUIRK_NO_LPM BIT(10)
|
||||
|
||||
/*
|
||||
* Device reports its bInterval as linear frames instead of the
|
||||
* USB 2.0 calculation.
|
||||
*/
|
||||
#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
|
||||
|
||||
#endif /* __LINUX_USB_QUIRKS_H */
|
||||
|
@@ -167,6 +167,7 @@ struct virtio_driver {
|
||||
unsigned int feature_table_size;
|
||||
const unsigned int *feature_table_legacy;
|
||||
unsigned int feature_table_size_legacy;
|
||||
int (*validate)(struct virtio_device *dev);
|
||||
int (*probe)(struct virtio_device *dev);
|
||||
void (*scan)(struct virtio_device *dev);
|
||||
void (*remove)(struct virtio_device *dev);
|
||||
|
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
|
||||
struct virtio_vsock_hdr hdr;
|
||||
struct work_struct work;
|
||||
struct list_head list;
|
||||
/* socket refcnt not held, only use for cancellation */
|
||||
struct vsock_sock *vsk;
|
||||
void *buf;
|
||||
u32 len;
|
||||
u32 off;
|
||||
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
|
||||
|
||||
struct virtio_vsock_pkt_info {
|
||||
u32 remote_cid, remote_port;
|
||||
struct vsock_sock *vsk;
|
||||
struct msghdr *msg;
|
||||
u32 pkt_len;
|
||||
u16 type;
|
||||
|
Reference in New Issue
Block a user