Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Updates for v5.7 This is a very big update for the core since Morimoto-san has been rather busy continuing his refactorings to clean up a lot of the cruft that we have accumilated over the years. We've also gained several new drivers, including initial (but still not complete) parts of the Intel SoundWire support. - Lots of refactorings to modernize the code from Morimoto-san. - Conversion of SND_SOC_ALL_CODECS to use imply from Geert Uytterhoeven. - Continued refactoring and fixing of the Intel support. - Soundwire and more advanced clocking support for Realtek RT5682. - Support for amlogic GX, Meson 8, Meson 8B and T9015 DAC, Broadcom DSL/PON, Ingenic JZ4760 and JZ4770, Realtek RL6231, and TI TAS2563 and TLV320ADCX140.
This commit is contained in:
@@ -524,7 +524,7 @@ struct request_queue {
|
||||
unsigned int sg_reserved_size;
|
||||
int node;
|
||||
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
||||
struct blk_trace *blk_trace;
|
||||
struct blk_trace __rcu *blk_trace;
|
||||
struct mutex blk_trace_mutex;
|
||||
#endif
|
||||
/*
|
||||
@@ -1494,7 +1494,6 @@ static inline void put_dev_sector(Sector p)
|
||||
}
|
||||
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
|
||||
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||
|
@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
|
||||
**/
|
||||
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
|
||||
do { \
|
||||
struct blk_trace *bt = (q)->blk_trace; \
|
||||
struct blk_trace *bt; \
|
||||
\
|
||||
rcu_read_lock(); \
|
||||
bt = rcu_dereference((q)->blk_trace); \
|
||||
if (unlikely(bt)) \
|
||||
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
|
||||
rcu_read_unlock(); \
|
||||
} while (0)
|
||||
#define blk_add_trace_msg(q, fmt, ...) \
|
||||
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
|
||||
@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
|
||||
|
||||
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
if (likely(!bt))
|
||||
return false;
|
||||
return bt->act_mask & BLK_TC_NOTIFY;
|
||||
struct blk_trace *bt;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
bt = rcu_dereference(q->blk_trace);
|
||||
ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
|
||||
|
@@ -10,6 +10,9 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
|
||||
#define BOOTCONFIG_MAGIC_LEN 12
|
||||
|
||||
/* XBC tree node */
|
||||
struct xbc_node {
|
||||
u16 next;
|
||||
|
@@ -62,6 +62,7 @@ struct css_task_iter {
|
||||
struct list_head *mg_tasks_head;
|
||||
struct list_head *dying_tasks_head;
|
||||
|
||||
struct list_head *cur_tasks_head;
|
||||
struct css_set *cur_cset;
|
||||
struct css_set *cur_dcset;
|
||||
struct task_struct *cur_task;
|
||||
|
@@ -248,15 +248,6 @@ typedef struct compat_siginfo {
|
||||
} _sifields;
|
||||
} compat_siginfo_t;
|
||||
|
||||
/*
|
||||
* These functions operate on 32- or 64-bit specs depending on
|
||||
* COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
|
||||
*/
|
||||
extern int compat_get_timespec(struct timespec *, const void __user *);
|
||||
extern int compat_put_timespec(const struct timespec *, void __user *);
|
||||
extern int compat_get_timeval(struct timeval *, const void __user *);
|
||||
extern int compat_put_timeval(const struct timeval *, void __user *);
|
||||
|
||||
struct compat_iovec {
|
||||
compat_uptr_t iov_base;
|
||||
compat_size_t iov_len;
|
||||
@@ -416,26 +407,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginf
|
||||
int get_compat_sigevent(struct sigevent *event,
|
||||
const struct compat_sigevent __user *u_event);
|
||||
|
||||
static inline int old_timeval32_compare(struct old_timeval32 *lhs,
|
||||
struct old_timeval32 *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_usec - rhs->tv_usec;
|
||||
}
|
||||
|
||||
static inline int old_timespec32_compare(struct old_timespec32 *lhs,
|
||||
struct old_timespec32 *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
|
||||
|
||||
/*
|
||||
|
@@ -201,9 +201,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
|
||||
return cpumask_weight(policy->cpus) > 1;
|
||||
}
|
||||
|
||||
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
|
||||
extern struct kobject *cpufreq_global_kobject;
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
unsigned int cpufreq_get(unsigned int cpu);
|
||||
unsigned int cpufreq_quick_get(unsigned int cpu);
|
||||
|
@@ -129,11 +129,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
sectors);
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
{
|
||||
return dax_get_by_host(host);
|
||||
}
|
||||
|
||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
put_dax(dax_dev);
|
||||
@@ -141,7 +136,7 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
|
||||
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
|
||||
int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc);
|
||||
struct dax_device *dax_dev, struct writeback_control *wbc);
|
||||
|
||||
struct page *dax_layout_busy_page(struct address_space *mapping);
|
||||
dax_entry_t dax_lock_page(struct page *page);
|
||||
@@ -160,11 +155,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
@@ -180,7 +170,7 @@ static inline struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||
}
|
||||
|
||||
static inline int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc)
|
||||
struct dax_device *dax_dev, struct writeback_control *wbc)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@@ -127,9 +127,9 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
struct debugfs_blob_wrapper *blob);
|
||||
|
||||
struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
struct debugfs_regset32 *regset);
|
||||
void debugfs_create_regset32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
struct debugfs_regset32 *regset);
|
||||
|
||||
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
|
||||
int nregs, void __iomem *base, char *prefix);
|
||||
@@ -304,11 +304,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_regset32(const char *name,
|
||||
umode_t mode, struct dentry *parent,
|
||||
struct debugfs_regset32 *regset)
|
||||
static inline void debugfs_create_regset32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
struct debugfs_regset32 *regset)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
|
||||
|
@@ -798,6 +798,17 @@ static inline struct device_node *dev_of_node(struct device *dev)
|
||||
return dev->of_node;
|
||||
}
|
||||
|
||||
static inline bool dev_has_sync_state(struct device *dev)
|
||||
{
|
||||
if (!dev)
|
||||
return false;
|
||||
if (dev->driver && dev->driver->sync_state)
|
||||
return true;
|
||||
if (dev->bus && dev->bus->sync_state)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* High level routines for use by the bus drivers
|
||||
*/
|
||||
|
@@ -69,19 +69,23 @@ struct dmar_pci_notify_info {
|
||||
extern struct rw_semaphore dmar_global_lock;
|
||||
extern struct list_head dmar_drhd_units;
|
||||
|
||||
#define for_each_drhd_unit(drhd) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
|
||||
#define for_each_drhd_unit(drhd) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
||||
dmar_rcu_check())
|
||||
|
||||
#define for_each_active_drhd_unit(drhd) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
||||
dmar_rcu_check()) \
|
||||
if (drhd->ignored) {} else
|
||||
|
||||
#define for_each_active_iommu(i, drhd) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
||||
dmar_rcu_check()) \
|
||||
if (i=drhd->iommu, drhd->ignored) {} else
|
||||
|
||||
#define for_each_iommu(i, drhd) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
||||
dmar_rcu_check()) \
|
||||
if (i=drhd->iommu, 0) {} else
|
||||
|
||||
static inline bool dmar_rcu_check(void)
|
||||
|
@@ -85,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
|
||||
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
|
||||
extern void set_close_on_exec(unsigned int fd, int flag);
|
||||
extern bool get_close_on_exec(unsigned int fd);
|
||||
extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
|
||||
extern int get_unused_fd_flags(unsigned flags);
|
||||
extern void put_unused_fd(unsigned int fd);
|
||||
|
||||
|
@@ -698,6 +698,7 @@ struct inode {
|
||||
struct rcu_head i_rcu;
|
||||
};
|
||||
atomic64_t i_version;
|
||||
atomic64_t i_sequence; /* see futex */
|
||||
atomic_t i_count;
|
||||
atomic_t i_dio_count;
|
||||
atomic_t i_writecount;
|
||||
|
@@ -31,23 +31,26 @@ struct task_struct;
|
||||
|
||||
union futex_key {
|
||||
struct {
|
||||
u64 i_seq;
|
||||
unsigned long pgoff;
|
||||
struct inode *inode;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} shared;
|
||||
struct {
|
||||
union {
|
||||
struct mm_struct *mm;
|
||||
u64 __tmp;
|
||||
};
|
||||
unsigned long address;
|
||||
struct mm_struct *mm;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} private;
|
||||
struct {
|
||||
u64 ptr;
|
||||
unsigned long word;
|
||||
void *ptr;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} both;
|
||||
};
|
||||
|
||||
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
|
||||
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
enum {
|
||||
|
@@ -245,18 +245,6 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
|
||||
!(disk->flags & GENHD_FL_NO_PART_SCAN);
|
||||
}
|
||||
|
||||
static inline bool disk_has_partitions(struct gendisk *disk)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
if (rcu_dereference(disk->part_tbl)->len > 1)
|
||||
ret = true;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline dev_t disk_devt(struct gendisk *disk)
|
||||
{
|
||||
return MKDEV(disk->major, disk->first_minor);
|
||||
@@ -298,6 +286,7 @@ extern void disk_part_iter_exit(struct disk_part_iter *piter);
|
||||
|
||||
extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
|
||||
sector_t sector);
|
||||
bool disk_has_partitions(struct gendisk *disk);
|
||||
|
||||
/*
|
||||
* Macros to operate on percpu disk statistics:
|
||||
|
@@ -492,7 +492,7 @@ struct hid_report_enum {
|
||||
};
|
||||
|
||||
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
|
||||
#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
|
||||
#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
|
||||
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
|
||||
#define HID_OUTPUT_FIFO_SIZE 64
|
||||
|
||||
|
@@ -22,12 +22,22 @@ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
|
||||
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
|
||||
unsigned int data_len);
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
|
||||
#else
|
||||
#define icmpv6_ndo_send icmpv6_send
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
static inline void icmpv6_send(struct sk_buff *skb,
|
||||
u8 type, u8 code, __u32 info)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void icmpv6_ndo_send(struct sk_buff *skb,
|
||||
u8 type, u8 code, __u32 info)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -2,15 +2,10 @@
|
||||
#ifndef _INET_DIAG_H_
|
||||
#define _INET_DIAG_H_ 1
|
||||
|
||||
#include <net/netlink.h>
|
||||
#include <uapi/linux/inet_diag.h>
|
||||
|
||||
struct net;
|
||||
struct sock;
|
||||
struct inet_hashinfo;
|
||||
struct nlattr;
|
||||
struct nlmsghdr;
|
||||
struct sk_buff;
|
||||
struct netlink_callback;
|
||||
|
||||
struct inet_diag_handler {
|
||||
void (*dump)(struct sk_buff *skb,
|
||||
@@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
|
||||
|
||||
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
|
||||
|
||||
static inline size_t inet_diag_msg_attrs_size(void)
|
||||
{
|
||||
return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
|
||||
+ nla_total_size(1) /* INET_DIAG_TOS */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
||||
+ nla_total_size(1) /* INET_DIAG_SKV6ONLY */
|
||||
#endif
|
||||
+ nla_total_size(4) /* INET_DIAG_MARK */
|
||||
+ nla_total_size(4); /* INET_DIAG_CLASS_ID */
|
||||
}
|
||||
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
|
||||
struct inet_diag_msg *r, int ext,
|
||||
struct user_namespace *user_ns, bool net_admin);
|
||||
|
@@ -123,6 +123,8 @@
|
||||
|
||||
#define dmar_readq(a) readq(a)
|
||||
#define dmar_writeq(a,v) writeq(v,a)
|
||||
#define dmar_readl(a) readl(a)
|
||||
#define dmar_writel(a, v) writel(v, a)
|
||||
|
||||
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
|
||||
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
|
||||
|
@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
|
||||
static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -192,7 +192,7 @@ enum {
|
||||
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
|
||||
|
||||
/* Irq domain name was allocated in __irq_domain_add() */
|
||||
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
|
||||
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
|
||||
|
||||
/* Irq domain is an IPI domain with virq per cpu */
|
||||
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
|
||||
|
@@ -66,33 +66,15 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
|
||||
*/
|
||||
#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
|
||||
|
||||
/* convert a timespec to ktime_t format: */
|
||||
static inline ktime_t timespec_to_ktime(struct timespec ts)
|
||||
{
|
||||
return ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
|
||||
/* convert a timespec64 to ktime_t format: */
|
||||
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
|
||||
{
|
||||
return ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
|
||||
/* convert a timeval to ktime_t format: */
|
||||
static inline ktime_t timeval_to_ktime(struct timeval tv)
|
||||
{
|
||||
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
|
||||
}
|
||||
|
||||
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
|
||||
#define ktime_to_timespec(kt) ns_to_timespec((kt))
|
||||
|
||||
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
|
||||
#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
|
||||
|
||||
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
|
||||
#define ktime_to_timeval(kt) ns_to_timeval((kt))
|
||||
|
||||
/* Convert ktime_t to nanoseconds */
|
||||
static inline s64 ktime_to_ns(const ktime_t kt)
|
||||
{
|
||||
@@ -215,25 +197,6 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
|
||||
|
||||
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
|
||||
|
||||
/**
|
||||
* ktime_to_timespec_cond - convert a ktime_t variable to timespec
|
||||
* format only if the variable contains data
|
||||
* @kt: the ktime_t variable to convert
|
||||
* @ts: the timespec variable to store the result in
|
||||
*
|
||||
* Return: %true if there was a successful conversion, %false if kt was 0.
|
||||
*/
|
||||
static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
|
||||
struct timespec *ts)
|
||||
{
|
||||
if (kt) {
|
||||
*ts = ktime_to_timespec(kt);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
|
||||
* format only if the variable contains data
|
||||
|
@@ -889,6 +889,8 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_post_init_vm(struct kvm *kvm);
|
||||
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
/*
|
||||
@@ -1342,7 +1344,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||
|
||||
struct kvm_vcpu *kvm_get_running_vcpu(void);
|
||||
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
|
||||
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
|
||||
bool kvm_arch_has_irq_bypass(void);
|
||||
|
@@ -688,7 +688,10 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
|
||||
u8 nic_rx_multi_path_tirs[0x1];
|
||||
u8 nic_rx_multi_path_tirs_fts[0x1];
|
||||
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
|
||||
u8 reserved_at_3[0x1d];
|
||||
u8 reserved_at_3[0x4];
|
||||
u8 sw_owner_reformat_supported[0x1];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 encap_general_header[0x1];
|
||||
u8 reserved_at_21[0xa];
|
||||
u8 log_max_packet_reformat_context[0x5];
|
||||
|
@@ -2715,6 +2715,10 @@ static inline bool debug_pagealloc_enabled_static(void)
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
|
||||
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
|
||||
|
||||
/*
|
||||
* When called in DEBUG_PAGEALLOC context, the call should most likely be
|
||||
* guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
|
||||
*/
|
||||
static inline void
|
||||
kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
|
@@ -333,6 +333,7 @@ struct mmc_host {
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
|
||||
MMC_CAP_UHS_DDR50)
|
||||
#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
|
||||
#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
|
||||
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
|
||||
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
|
||||
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
|
||||
|
@@ -72,6 +72,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
|
||||
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
|
||||
#define NET_RX_DROP 1 /* packet dropped */
|
||||
|
||||
#define MAX_NEST_DEV 8
|
||||
|
||||
/*
|
||||
* Transmit return codes: transmit return codes originate from three different
|
||||
* namespaces:
|
||||
@@ -1616,6 +1618,7 @@ enum netdev_priv_flags {
|
||||
* and drivers will need to set them appropriately.
|
||||
*
|
||||
* @mpls_features: Mask of features inheritable by MPLS
|
||||
* @gso_partial_features: value(s) from NETIF_F_GSO\*
|
||||
*
|
||||
* @ifindex: interface index
|
||||
* @group: The group the device belongs to
|
||||
@@ -1640,8 +1643,11 @@ enum netdev_priv_flags {
|
||||
* @netdev_ops: Includes several pointers to callbacks,
|
||||
* if one wants to override the ndo_*() functions
|
||||
* @ethtool_ops: Management operations
|
||||
* @l3mdev_ops: Layer 3 master device operations
|
||||
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
|
||||
* discovery handling. Necessary for e.g. 6LoWPAN.
|
||||
* @xfrmdev_ops: Transformation offload operations
|
||||
* @tlsdev_ops: Transport Layer Security offload operations
|
||||
* @header_ops: Includes callbacks for creating,parsing,caching,etc
|
||||
* of Layer 2 headers.
|
||||
*
|
||||
@@ -1680,6 +1686,7 @@ enum netdev_priv_flags {
|
||||
* @dev_port: Used to differentiate devices that share
|
||||
* the same function
|
||||
* @addr_list_lock: XXX: need comments on this one
|
||||
* @name_assign_type: network interface name assignment type
|
||||
* @uc_promisc: Counter that indicates promiscuous mode
|
||||
* has been enabled due to the need to listen to
|
||||
* additional unicast addresses in a device that
|
||||
@@ -1702,6 +1709,9 @@ enum netdev_priv_flags {
|
||||
* @ip6_ptr: IPv6 specific data
|
||||
* @ax25_ptr: AX.25 specific data
|
||||
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
|
||||
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
|
||||
* device struct
|
||||
* @mpls_ptr: mpls_dev struct pointer
|
||||
*
|
||||
* @dev_addr: Hw address (before bcast,
|
||||
* because most packets are unicast)
|
||||
@@ -1710,6 +1720,8 @@ enum netdev_priv_flags {
|
||||
* @num_rx_queues: Number of RX queues
|
||||
* allocated at register_netdev() time
|
||||
* @real_num_rx_queues: Number of RX queues currently active in device
|
||||
* @xdp_prog: XDP sockets filter program pointer
|
||||
* @gro_flush_timeout: timeout for GRO layer in NAPI
|
||||
*
|
||||
* @rx_handler: handler for received packets
|
||||
* @rx_handler_data: XXX: need comments on this one
|
||||
@@ -1731,10 +1743,14 @@ enum netdev_priv_flags {
|
||||
* @qdisc: Root qdisc from userspace point of view
|
||||
* @tx_queue_len: Max frames per queue allowed
|
||||
* @tx_global_lock: XXX: need comments on this one
|
||||
* @xdp_bulkq: XDP device bulk queue
|
||||
* @xps_cpus_map: all CPUs map for XPS device
|
||||
* @xps_rxqs_map: all RXQs map for XPS device
|
||||
*
|
||||
* @xps_maps: XXX: need comments on this one
|
||||
* @miniq_egress: clsact qdisc specific data for
|
||||
* egress processing
|
||||
* @qdisc_hash: qdisc hash table
|
||||
* @watchdog_timeo: Represents the timeout that is used by
|
||||
* the watchdog (see dev_watchdog())
|
||||
* @watchdog_timer: List of timers
|
||||
@@ -3548,7 +3564,7 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
|
||||
* netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
|
||||
* @n: CPU/Rx queue index
|
||||
* @src1p: the first CPUs/Rx queues mask pointer
|
||||
* @src2p: the second CPUs/Rx queues mask pointer
|
||||
@@ -4375,11 +4391,8 @@ void *netdev_lower_get_next(struct net_device *dev,
|
||||
ldev; \
|
||||
ldev = netdev_lower_get_next(dev, &(iter)))
|
||||
|
||||
struct net_device *netdev_all_lower_get_next(struct net_device *dev,
|
||||
struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
|
||||
struct list_head **iter);
|
||||
struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
|
||||
struct list_head **iter);
|
||||
|
||||
int netdev_walk_all_lower_dev(struct net_device *dev,
|
||||
int (*fn)(struct net_device *lower_dev,
|
||||
void *data),
|
||||
|
@@ -121,6 +121,7 @@ struct ip_set_ext {
|
||||
u32 timeout;
|
||||
u8 packets_op;
|
||||
u8 bytes_op;
|
||||
bool target;
|
||||
};
|
||||
|
||||
struct ip_set;
|
||||
@@ -187,6 +188,14 @@ struct ip_set_type_variant {
|
||||
/* Return true if "b" set is the same as "a"
|
||||
* according to the create set parameters */
|
||||
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
|
||||
/* Region-locking is used */
|
||||
bool region_lock;
|
||||
};
|
||||
|
||||
struct ip_set_region {
|
||||
spinlock_t lock; /* Region lock */
|
||||
size_t ext_size; /* Size of the dynamic extensions */
|
||||
u32 elements; /* Number of elements vs timeout */
|
||||
};
|
||||
|
||||
/* The core set type structure */
|
||||
@@ -501,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
|
||||
}
|
||||
|
||||
#define IP_SET_INIT_KEXT(skb, opt, set) \
|
||||
{ .bytes = (skb)->len, .packets = 1, \
|
||||
{ .bytes = (skb)->len, .packets = 1, .target = true,\
|
||||
.timeout = ip_set_adt_opt_timeout(opt, set) }
|
||||
|
||||
#define IP_SET_INIT_UEXT(set) \
|
||||
|
@@ -337,35 +337,17 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
|
||||
return NFS_SERVER(inode)->caps & cap;
|
||||
}
|
||||
|
||||
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
|
||||
{
|
||||
dentry->d_time = verf;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_save_change_attribute - Returns the inode attribute change cookie
|
||||
* @dir - pointer to parent directory inode
|
||||
* The "change attribute" is updated every time we finish an operation
|
||||
* that will result in a metadata change on the server.
|
||||
* The "cache change attribute" is updated when we need to revalidate
|
||||
* our dentry cache after a directory was seen to change on the server.
|
||||
*/
|
||||
static inline unsigned long nfs_save_change_attribute(struct inode *dir)
|
||||
{
|
||||
return NFS_I(dir)->cache_change_attribute;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_verify_change_attribute - Detects NFS remote directory changes
|
||||
* @dir - pointer to parent directory inode
|
||||
* @chattr - previously saved change attribute
|
||||
* Return "false" if the verifiers doesn't match the change attribute.
|
||||
* This would usually indicate that the directory contents have changed on
|
||||
* the server, and that any dentries need revalidating.
|
||||
*/
|
||||
static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
|
||||
{
|
||||
return chattr == NFS_I(dir)->cache_change_attribute;
|
||||
}
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/inode.c
|
||||
*/
|
||||
@@ -495,6 +477,10 @@ extern const struct file_operations nfs_dir_operations;
|
||||
extern const struct dentry_operations nfs_dentry_operations;
|
||||
|
||||
extern void nfs_force_lookup_revalidate(struct inode *dir);
|
||||
extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
|
||||
#if IS_ENABLED(CONFIG_NFS_V4)
|
||||
extern void nfs_clear_verifier_delegated(struct inode *inode);
|
||||
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
|
||||
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
|
||||
struct nfs_fh *fh, struct nfs_fattr *fattr,
|
||||
struct nfs4_label *label);
|
||||
|
@@ -11,17 +11,17 @@ struct of_device_id;
|
||||
|
||||
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
|
||||
|
||||
unsigned int of_clk_get_parent_count(struct device_node *np);
|
||||
const char *of_clk_get_parent_name(struct device_node *np, int index);
|
||||
unsigned int of_clk_get_parent_count(const struct device_node *np);
|
||||
const char *of_clk_get_parent_name(const struct device_node *np, int index);
|
||||
void of_clk_init(const struct of_device_id *matches);
|
||||
|
||||
#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
|
||||
|
||||
static inline unsigned int of_clk_get_parent_count(struct device_node *np)
|
||||
static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline const char *of_clk_get_parent_name(struct device_node *np,
|
||||
static inline const char *of_clk_get_parent_name(const struct device_node *np,
|
||||
int index)
|
||||
{
|
||||
return NULL;
|
||||
|
@@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
|
||||
|
||||
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
|
||||
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
|
||||
PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
|
||||
PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
|
||||
PAGEFLAG(Referenced, referenced, PF_HEAD)
|
||||
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
|
||||
__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
|
||||
|
@@ -357,6 +357,7 @@ struct macsec_ops;
|
||||
* is_gigabit_capable: Set to true if PHY supports 1000Mbps
|
||||
* has_fixups: Set to true if this phy has fixups/quirks.
|
||||
* suspended: Set to true if this phy has been suspended successfully.
|
||||
* suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
|
||||
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
|
||||
* loopback_enabled: Set true if this phy has been loopbacked successfully.
|
||||
* state: state of the PHY for management purposes
|
||||
@@ -396,6 +397,7 @@ struct phy_device {
|
||||
unsigned is_gigabit_capable:1;
|
||||
unsigned has_fixups:1;
|
||||
unsigned suspended:1;
|
||||
unsigned suspended_by_mdio_bus:1;
|
||||
unsigned sysfs_links:1;
|
||||
unsigned loopback_enabled:1;
|
||||
|
||||
@@ -557,6 +559,7 @@ struct phy_driver {
|
||||
/*
|
||||
* Checks if the PHY generated an interrupt.
|
||||
* For multi-PHY devices with shared PHY interrupt pin
|
||||
* Set interrupt bits have to be cleared.
|
||||
*/
|
||||
int (*did_interrupt)(struct phy_device *phydev);
|
||||
|
||||
|
@@ -29,7 +29,8 @@ struct pipe_buffer {
|
||||
/**
|
||||
* struct pipe_inode_info - a linux kernel pipe
|
||||
* @mutex: mutex protecting the whole thing
|
||||
* @wait: reader/writer wait point in case of empty/full pipe
|
||||
* @rd_wait: reader wait point in case of empty pipe
|
||||
* @wr_wait: writer wait point in case of full pipe
|
||||
* @head: The point of buffer production
|
||||
* @tail: The point of buffer consumption
|
||||
* @max_usage: The maximum number of slots that may be used in the ring
|
||||
|
@@ -11,6 +11,7 @@ struct omap2_mcspi_platform_config {
|
||||
unsigned short num_cs;
|
||||
unsigned int regs_offset;
|
||||
unsigned int pin_dir:1;
|
||||
size_t max_xfer_len;
|
||||
};
|
||||
|
||||
struct omap2_mcspi_device_config {
|
||||
|
@@ -24,7 +24,7 @@ struct platform_device {
|
||||
int id;
|
||||
bool id_auto;
|
||||
struct device dev;
|
||||
u64 dma_mask;
|
||||
u64 platform_dma_mask;
|
||||
u32 num_resources;
|
||||
struct resource *resource;
|
||||
|
||||
|
@@ -145,6 +145,13 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
||||
}
|
||||
}
|
||||
|
||||
/* after that hlist_nulls_del will work */
|
||||
static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
|
||||
{
|
||||
n->pprev = &n->next;
|
||||
n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
|
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
|
||||
/**
|
||||
* rhashtable_lookup_get_insert_key - lookup and insert object into hash table
|
||||
* @ht: hash table
|
||||
* @key: key
|
||||
* @obj: pointer to hash head inside object
|
||||
* @params: hash table parameters
|
||||
* @data: pointer to element data already in hashes
|
||||
*
|
||||
* Just like rhashtable_lookup_insert_key(), but this function returns the
|
||||
* object if it exists, NULL if it does not and the insertion was successful,
|
||||
|
@@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
void calc_load_nohz_start(void);
|
||||
void calc_load_nohz_remote(struct rq *rq);
|
||||
void calc_load_nohz_stop(void);
|
||||
#else
|
||||
static inline void calc_load_nohz_start(void) { }
|
||||
static inline void calc_load_nohz_remote(struct rq *rq) { }
|
||||
static inline void calc_load_nohz_stop(void) { }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
|
@@ -611,9 +611,15 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @next: Next buffer in list
|
||||
* @prev: Previous buffer in list
|
||||
* @tstamp: Time we arrived/left
|
||||
* @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
|
||||
* for retransmit timer
|
||||
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
|
||||
* @list: queue head
|
||||
* @sk: Socket we are owned by
|
||||
* @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
|
||||
* fragmentation management
|
||||
* @dev: Device we arrived on/are leaving by
|
||||
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
|
||||
* @cb: Control buffer. Free for use by every layer. Put private vars here
|
||||
* @_skb_refdst: destination entry (with norefcount bit)
|
||||
* @sp: the security path, used for xfrm
|
||||
@@ -632,6 +638,9 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @pkt_type: Packet class
|
||||
* @fclone: skbuff clone status
|
||||
* @ipvs_property: skbuff is owned by ipvs
|
||||
* @inner_protocol_type: whether the inner protocol is
|
||||
* ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
|
||||
* @remcsum_offload: remote checksum offload is enabled
|
||||
* @offload_fwd_mark: Packet was L2-forwarded in hardware
|
||||
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
|
||||
* @tc_skip_classify: do not classify packet. set by IFB device
|
||||
@@ -650,6 +659,8 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @tc_index: Traffic control index
|
||||
* @hash: the packet hash
|
||||
* @queue_mapping: Queue mapping for multiqueue devices
|
||||
* @head_frag: skb was allocated from page fragments,
|
||||
* not allocated by kmalloc() or vmalloc().
|
||||
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
|
||||
* @active_extensions: active extensions (skb_ext_id types)
|
||||
* @ndisc_nodetype: router type (from link layer)
|
||||
@@ -660,15 +671,28 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @wifi_acked_valid: wifi_acked was set
|
||||
* @wifi_acked: whether frame was acked on wifi or not
|
||||
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
|
||||
* @encapsulation: indicates the inner headers in the skbuff are valid
|
||||
* @encap_hdr_csum: software checksum is needed
|
||||
* @csum_valid: checksum is already valid
|
||||
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
|
||||
* @csum_complete_sw: checksum was completed by software
|
||||
* @csum_level: indicates the number of consecutive checksums found in
|
||||
* the packet minus one that have been verified as
|
||||
* CHECKSUM_UNNECESSARY (max 3)
|
||||
* @dst_pending_confirm: need to confirm neighbour
|
||||
* @decrypted: Decrypted SKB
|
||||
* @napi_id: id of the NAPI struct this skb came from
|
||||
* @sender_cpu: (aka @napi_id) source CPU in XPS
|
||||
* @secmark: security marking
|
||||
* @mark: Generic packet mark
|
||||
* @reserved_tailroom: (aka @mark) number of bytes of free space available
|
||||
* at the tail of an sk_buff
|
||||
* @vlan_present: VLAN tag is present
|
||||
* @vlan_proto: vlan encapsulation protocol
|
||||
* @vlan_tci: vlan tag control information
|
||||
* @inner_protocol: Protocol (encapsulation)
|
||||
* @inner_ipproto: (aka @inner_protocol) stores ipproto when
|
||||
* skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
|
||||
* @inner_transport_header: Inner transport layer header (encapsulation)
|
||||
* @inner_network_header: Network layer header (encapsulation)
|
||||
* @inner_mac_header: Link layer header (encapsulation)
|
||||
@@ -750,7 +774,9 @@ struct sk_buff {
|
||||
#endif
|
||||
#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
|
||||
|
||||
/* private: */
|
||||
__u8 __cloned_offset[0];
|
||||
/* public: */
|
||||
__u8 cloned:1,
|
||||
nohdr:1,
|
||||
fclone:2,
|
||||
@@ -775,7 +801,9 @@ struct sk_buff {
|
||||
#endif
|
||||
#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
|
||||
|
||||
/* private: */
|
||||
__u8 __pkt_type_offset[0];
|
||||
/* public: */
|
||||
__u8 pkt_type:3;
|
||||
__u8 ignore_df:1;
|
||||
__u8 nf_trace:1;
|
||||
@@ -798,7 +826,9 @@ struct sk_buff {
|
||||
#define PKT_VLAN_PRESENT_BIT 0
|
||||
#endif
|
||||
#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
|
||||
/* private: */
|
||||
__u8 __pkt_vlan_present_offset[0];
|
||||
/* public: */
|
||||
__u8 vlan_present:1;
|
||||
__u8 csum_complete_sw:1;
|
||||
__u8 csum_level:2;
|
||||
|
@@ -401,7 +401,8 @@ extern int __sys_sendto(int fd, void __user *buff, size_t len,
|
||||
int addr_len);
|
||||
extern int __sys_accept4_file(struct file *file, unsigned file_flags,
|
||||
struct sockaddr __user *upeer_sockaddr,
|
||||
int __user *upeer_addrlen, int flags);
|
||||
int __user *upeer_addrlen, int flags,
|
||||
unsigned long nofile);
|
||||
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
|
||||
int __user *upeer_addrlen, int flags);
|
||||
extern int __sys_socket(int family, int type, int protocol);
|
||||
|
@@ -284,6 +284,7 @@ struct sdw_dpn_audio_mode {
|
||||
* @max_async_buffer: Number of samples that this port can buffer in
|
||||
* asynchronous modes
|
||||
* @block_pack_mode: Type of block port mode supported
|
||||
* @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register
|
||||
* @port_encoding: Payload Channel Sample encoding schemes supported
|
||||
* @audio_modes: Audio modes supported
|
||||
*/
|
||||
@@ -307,6 +308,7 @@ struct sdw_dpn_prop {
|
||||
u32 modes;
|
||||
u32 max_async_buffer;
|
||||
bool block_pack_mode;
|
||||
bool read_only_wordlength;
|
||||
u32 port_encoding;
|
||||
struct sdw_dpn_audio_mode *audio_modes;
|
||||
};
|
||||
|
@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
|
||||
int (*begin)(void);
|
||||
int (*prepare)(void);
|
||||
int (*prepare_late)(void);
|
||||
void (*wake)(void);
|
||||
bool (*wake)(void);
|
||||
void (*restore_early)(void);
|
||||
void (*restore)(void);
|
||||
void (*end)(void);
|
||||
|
@@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
enum dma_sync_target target);
|
||||
|
||||
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
extern enum swiotlb_force swiotlb_force;
|
||||
extern phys_addr_t io_tlb_start, io_tlb_end;
|
||||
@@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
||||
return paddr >= io_tlb_start && paddr < io_tlb_end;
|
||||
}
|
||||
|
||||
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void __init swiotlb_exit(void);
|
||||
unsigned int swiotlb_max_segment(void);
|
||||
size_t swiotlb_max_mapping_size(struct device *dev);
|
||||
@@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
|
||||
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void swiotlb_exit(void)
|
||||
{
|
||||
}
|
||||
|
@@ -12,8 +12,6 @@
|
||||
#include <linux/time64.h>
|
||||
#include <linux/timex.h>
|
||||
|
||||
#define TIME_T_MAX (__kernel_old_time_t)((1UL << ((sizeof(__kernel_old_time_t) << 3) - 1)) - 1)
|
||||
|
||||
typedef s32 old_time32_t;
|
||||
|
||||
struct old_timespec32 {
|
||||
@@ -73,162 +71,12 @@ struct __kernel_timex;
|
||||
int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *);
|
||||
int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *);
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
|
||||
/* timespec64 is defined as timespec here */
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
return *(const struct timespec *)&ts64;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
return *(const struct timespec64 *)&ts;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
struct timespec ret;
|
||||
|
||||
ret.tv_sec = (time_t)ts64.tv_sec;
|
||||
ret.tv_nsec = ts64.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
struct timespec64 ret;
|
||||
|
||||
ret.tv_sec = ts.tv_sec;
|
||||
ret.tv_nsec = ts.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int timespec_equal(const struct timespec *a,
|
||||
const struct timespec *b)
|
||||
{
|
||||
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* lhs < rhs: return <0
|
||||
* lhs == rhs: return 0
|
||||
* lhs > rhs: return >0
|
||||
*/
|
||||
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timespec is norm, false if denorm:
|
||||
*/
|
||||
static inline bool timespec_valid(const struct timespec *ts)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (ts->tv_sec < 0)
|
||||
return false;
|
||||
/* Can't have more nanoseconds then a second */
|
||||
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* timespec_to_ns - Convert timespec to nanoseconds
|
||||
* @ts: pointer to the timespec variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timespec
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timespec_to_ns(const struct timespec *ts)
|
||||
{
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timespec - Convert nanoseconds to timespec
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timespec representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timespec ns_to_timespec(const s64 nsec);
|
||||
|
||||
/**
|
||||
* timespec_add_ns - Adds nanoseconds to a timespec
|
||||
* @a: pointer to timespec to be incremented
|
||||
* @ns: unsigned nanoseconds value to be added
|
||||
*
|
||||
* This must always be inlined because its used from the x86-64 vdso,
|
||||
* which cannot call other kernel functions.
|
||||
*/
|
||||
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
|
||||
{
|
||||
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
|
||||
a->tv_nsec = ns;
|
||||
}
|
||||
|
||||
static inline unsigned long mktime(const unsigned int year,
|
||||
const unsigned int mon, const unsigned int day,
|
||||
const unsigned int hour, const unsigned int min,
|
||||
const unsigned int sec)
|
||||
{
|
||||
return mktime64(year, mon, day, hour, min, sec);
|
||||
}
|
||||
|
||||
static inline bool timeval_valid(const struct timeval *tv)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (tv->tv_sec < 0)
|
||||
return false;
|
||||
|
||||
/* Can't have more microseconds then a second */
|
||||
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* timeval_to_ns - Convert timeval to nanoseconds
|
||||
* @ts: pointer to the timeval variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timeval
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timeval_to_ns(const struct timeval *tv)
|
||||
{
|
||||
return ((s64) tv->tv_sec * NSEC_PER_SEC) +
|
||||
tv->tv_usec * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timeval - Convert nanoseconds to timeval
|
||||
* ns_to_kernel_old_timeval - Convert nanoseconds to timeval
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timeval representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timeval ns_to_timeval(const s64 nsec);
|
||||
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
|
||||
|
||||
/*
|
||||
* Old names for the 32-bit time_t interfaces, these will be removed
|
||||
* when everything uses the new names.
|
||||
*/
|
||||
#define compat_time_t old_time32_t
|
||||
#define compat_timeval old_timeval32
|
||||
#define compat_timespec old_timespec32
|
||||
#define compat_itimerspec old_itimerspec32
|
||||
#define ns_to_compat_timeval ns_to_old_timeval32
|
||||
#define get_compat_itimerspec64 get_old_itimerspec32
|
||||
#define put_compat_itimerspec64 put_old_itimerspec32
|
||||
#define compat_get_timespec64 get_old_timespec32
|
||||
#define compat_put_timespec64 put_old_timespec32
|
||||
|
||||
#endif
|
||||
|
@@ -11,36 +11,4 @@ static inline unsigned long get_seconds(void)
|
||||
return ktime_get_real_seconds();
|
||||
}
|
||||
|
||||
static inline void getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ktime_get_real_ts64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void ktime_get_ts(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ktime_get_ts64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void getrawmonotonic(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ktime_get_raw_ts64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void getboottime(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getboottime64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -440,7 +440,7 @@ struct synth_event_trace_state {
|
||||
struct synth_event *event;
|
||||
unsigned int cur_field;
|
||||
unsigned int n_u64;
|
||||
bool enabled;
|
||||
bool disabled;
|
||||
bool add_next;
|
||||
bool add_name;
|
||||
};
|
||||
|
@@ -225,6 +225,8 @@ struct tty_port_client_operations {
|
||||
void (*write_wakeup)(struct tty_port *port);
|
||||
};
|
||||
|
||||
extern const struct tty_port_client_operations tty_port_default_client_ops;
|
||||
|
||||
struct tty_port {
|
||||
struct tty_bufhead buf; /* Locked internally */
|
||||
struct tty_struct *tty; /* Back pointer */
|
||||
|
@@ -65,11 +65,6 @@ typedef __kernel_ssize_t ssize_t;
|
||||
typedef __kernel_ptrdiff_t ptrdiff_t;
|
||||
#endif
|
||||
|
||||
#ifndef _TIME_T
|
||||
#define _TIME_T
|
||||
typedef __kernel_old_time_t time_t;
|
||||
#endif
|
||||
|
||||
#ifndef _CLOCK_T
|
||||
#define _CLOCK_T
|
||||
typedef __kernel_clock_t clock_t;
|
||||
|
@@ -69,4 +69,7 @@
|
||||
/* Hub needs extra delay after resetting its port. */
|
||||
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
|
||||
|
||||
/* device has blacklisted endpoints */
|
||||
#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
|
||||
|
||||
#endif /* __LINUX_USB_QUIRKS_H */
|
||||
|
@@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
||||
|
||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||
unsigned long pgoff);
|
||||
void vmalloc_sync_all(void);
|
||||
|
||||
void vmalloc_sync_mappings(void);
|
||||
void vmalloc_sync_unmappings(void);
|
||||
|
||||
/*
|
||||
* Lowlevel-APIs (not for driver use!)
|
||||
*/
|
||||
|
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
|
||||
*
|
||||
* We queue the work to the CPU on which it was submitted, but if the CPU dies
|
||||
* it can be processed by another CPU.
|
||||
*
|
||||
* Memory-ordering properties: If it returns %true, guarantees that all stores
|
||||
* preceding the call to queue_work() in the program order will be visible from
|
||||
* the CPU which will execute @work by the time such work executes, e.g.,
|
||||
*
|
||||
* { x is initially 0 }
|
||||
*
|
||||
* CPU0 CPU1
|
||||
*
|
||||
* WRITE_ONCE(x, 1); [ @work is being executed ]
|
||||
* r0 = queue_work(wq, work); r1 = READ_ONCE(x);
|
||||
*
|
||||
* Forbids: r0 == true && r1 == 0
|
||||
*/
|
||||
static inline bool queue_work(struct workqueue_struct *wq,
|
||||
struct work_struct *work)
|
||||
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
|
||||
* This puts a job in the kernel-global workqueue if it was not already
|
||||
* queued and leaves it in the same position on the kernel-global
|
||||
* workqueue otherwise.
|
||||
*
|
||||
* Shares the same memory-ordering properties of queue_work(), cf. the
|
||||
* DocBook header of queue_work().
|
||||
*/
|
||||
static inline bool schedule_work(struct work_struct *work)
|
||||
{
|
||||
|
Reference in New Issue
Block a user