Merge tag 'asoc-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Updates for v4.13 The big news with this release is the of-graph card, this provides a replacement for simple-card that is much more flexibile and scalable, allowing many more systems to use a generic sound card than was possible before: - The of-graph card, finally merged after a long and dedicated effort by Morimoto-san. - New widget types intended mainly for use with DSPs. - New drivers for Allwinner V3s SoCs, Ensonic ES8316, several classes of x86 machine, Rockchip PDM controllers, STM32 I2S and S/PDIF controllers and ZTE AUD96P22 CODECs.
This commit is contained in:
@@ -426,6 +426,7 @@ extern void bio_advance(struct bio *, unsigned);
|
||||
|
||||
extern void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs);
|
||||
extern void bio_uninit(struct bio *);
|
||||
extern void bio_reset(struct bio *);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
|
@@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
bool kick_requeue_list);
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||
void blk_mq_abort_requeue_list(struct request_queue *q);
|
||||
void blk_mq_complete_request(struct request *rq);
|
||||
|
||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||
|
@@ -391,6 +391,8 @@ struct request_queue {
|
||||
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
||||
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
||||
|
||||
atomic_t shared_hctx_restart;
|
||||
|
||||
struct blk_queue_stats *stats;
|
||||
struct rq_wb *rq_wb;
|
||||
|
||||
@@ -586,6 +588,8 @@ struct request_queue {
|
||||
|
||||
size_t cmd_size;
|
||||
void *rq_alloc_data;
|
||||
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
||||
|
@@ -3,6 +3,8 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
|
||||
|
||||
/*
|
||||
@@ -12,12 +14,10 @@
|
||||
*/
|
||||
|
||||
# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
|
||||
extern const char *ceph_file_part(const char *s, int len);
|
||||
# define dout(fmt, ...) \
|
||||
pr_debug("%.*s %12.12s:%-4d : " fmt, \
|
||||
8 - (int)sizeof(KBUILD_MODNAME), " ", \
|
||||
ceph_file_part(__FILE__, sizeof(__FILE__)), \
|
||||
__LINE__, ##__VA_ARGS__)
|
||||
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
|
||||
# else
|
||||
/* faux printk call just to see any compiler warnings. */
|
||||
# define dout(fmt, ...) do { \
|
||||
|
@@ -48,6 +48,7 @@ enum {
|
||||
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
|
||||
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
|
||||
CSS_VISIBLE = (1 << 3), /* css is visible to userland */
|
||||
CSS_DYING = (1 << 4), /* css is dying */
|
||||
};
|
||||
|
||||
/* bits in struct cgroup flags field */
|
||||
|
@@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* css_is_dying - test whether the specified css is dying
|
||||
* @css: target css
|
||||
*
|
||||
* Test whether @css is in the process of offlining or already offline. In
|
||||
* most cases, ->css_online() and ->css_offline() callbacks should be
|
||||
* enough; however, the actual offline operations are RCU delayed and this
|
||||
* test returns %true also when @css is scheduled to be offlined.
|
||||
*
|
||||
* This is useful, for example, when the use case requires synchronous
|
||||
* behavior with respect to cgroup removal. cgroup removal schedules css
|
||||
* offlining but the css can seem alive while the operation is being
|
||||
* delayed. If the delay affects user visible semantics, this test can be
|
||||
* used to resolve the situation.
|
||||
*/
|
||||
static inline bool css_is_dying(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* css_put - put a css reference
|
||||
* @css: target css
|
||||
|
@@ -15,3 +15,11 @@
|
||||
* with any version that can compile the kernel
|
||||
*/
|
||||
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
|
||||
|
||||
/*
|
||||
* GCC does not warn about unused static inline functions for
|
||||
* -Wunused-function. This turns out to avoid the need for complex #ifdef
|
||||
* directives. Suppress the warning in clang as well.
|
||||
*/
|
||||
#undef inline
|
||||
#define inline inline __attribute__((unused)) notrace
|
||||
|
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item,
|
||||
const char *name,
|
||||
struct config_item_type *type);
|
||||
|
||||
extern struct config_item * config_item_get(struct config_item *);
|
||||
extern struct config_item *config_item_get(struct config_item *);
|
||||
extern struct config_item *config_item_get_unless_zero(struct config_item *);
|
||||
extern void config_item_put(struct config_item *);
|
||||
|
||||
struct config_item_type {
|
||||
|
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
struct iommu_domain;
|
||||
struct msi_msg;
|
||||
struct device;
|
||||
|
||||
static inline int iommu_dma_init(void)
|
||||
{
|
||||
|
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; }
|
||||
static inline int dmi_name_in_serial(const char *s) { return 0; }
|
||||
#define dmi_available 0
|
||||
static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
|
||||
void *private_data) { return -1; }
|
||||
void *private_data) { return -ENXIO; }
|
||||
static inline bool dmi_match(enum dmi_field f, const char *str)
|
||||
{ return false; }
|
||||
static inline void dmi_memdev_name(u16 handle, const char **bank,
|
||||
|
@@ -153,7 +153,7 @@ struct elevator_type
|
||||
#endif
|
||||
|
||||
/* managed by elevator core */
|
||||
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
|
||||
char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
@@ -272,6 +272,16 @@ struct bpf_prog_aux;
|
||||
.off = OFF, \
|
||||
.imm = IMM })
|
||||
|
||||
/* Unconditional jumps, goto pc + off16 */
|
||||
|
||||
#define BPF_JMP_A(OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_JA, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Function call */
|
||||
|
||||
#define BPF_EMIT_CALL(FUNC) \
|
||||
|
@@ -41,7 +41,7 @@ struct vm_area_struct;
|
||||
#define ___GFP_WRITE 0x800000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x4000000u
|
||||
#define ___GFP_NOLOCKDEP 0x2000000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
|
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
|
||||
.flags = _flags, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
|
||||
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
|
||||
#else
|
||||
static inline
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
|
||||
static inline
|
||||
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_GPIO_MACHINE_H */
|
||||
|
@@ -167,7 +167,6 @@ static inline void hash_del_rcu(struct hlist_node *node)
|
||||
/**
|
||||
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
|
||||
* same bucket in an rcu enabled hashtable
|
||||
* in a rcu enabled hashtable
|
||||
* @name: hashtable to iterate
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
|
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
|
||||
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (skb_vlan_tagged_multi(skb))
|
||||
features = netdev_intersect_features(features,
|
||||
NETIF_F_SG |
|
||||
NETIF_F_HIGHDMA |
|
||||
NETIF_F_FRAGLIST |
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
if (skb_vlan_tagged_multi(skb)) {
|
||||
/* In the case of multi-tagged packets, use a direct mask
|
||||
* instead of using netdev_interesect_features(), to make
|
||||
* sure that only devices supporting NETIF_F_HW_CSUM will
|
||||
* have checksum offloading support.
|
||||
*/
|
||||
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
|
||||
NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@@ -417,6 +417,10 @@
|
||||
#define ICH_HCR_EN (1 << 0)
|
||||
#define ICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define ICH_VMCR_ACK_CTL_SHIFT 2
|
||||
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
|
||||
#define ICH_VMCR_FIQ_EN_SHIFT 3
|
||||
#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
|
||||
#define ICH_VMCR_CBPR_SHIFT 4
|
||||
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
|
||||
#define ICH_VMCR_EOIM_SHIFT 9
|
||||
|
@@ -25,7 +25,18 @@
|
||||
#define GICC_ENABLE 0x1
|
||||
#define GICC_INT_PRI_THRESHOLD 0xf0
|
||||
|
||||
#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
|
||||
#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
|
||||
#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
|
||||
#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
|
||||
#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
|
||||
#define GIC_CPU_CTRL_AckCtl_SHIFT 2
|
||||
#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
|
||||
#define GIC_CPU_CTRL_FIQEn_SHIFT 3
|
||||
#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
|
||||
#define GIC_CPU_CTRL_CBPR_SHIFT 4
|
||||
#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
|
||||
#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
|
||||
#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
|
||||
|
||||
#define GICC_IAR_INT_ID_MASK 0x3ff
|
||||
#define GICC_INT_SPURIOUS 1023
|
||||
@@ -84,8 +95,19 @@
|
||||
#define GICH_LR_EOI (1 << 19)
|
||||
#define GICH_LR_HW (1 << 31)
|
||||
|
||||
#define GICH_VMCR_CTRL_SHIFT 0
|
||||
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
|
||||
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
|
||||
#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
|
||||
#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
|
||||
#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
|
||||
#define GICH_VMCR_ACK_CTL_SHIFT 2
|
||||
#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
|
||||
#define GICH_VMCR_FIQ_EN_SHIFT 3
|
||||
#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
|
||||
#define GICH_VMCR_CBPR_SHIFT 4
|
||||
#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
|
||||
#define GICH_VMCR_EOI_MODE_SHIFT 9
|
||||
#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
|
||||
|
||||
#define GICH_VMCR_PRIMASK_SHIFT 27
|
||||
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
|
||||
#define GICH_VMCR_BINPOINT_SHIFT 21
|
||||
|
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
|
||||
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
|
||||
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
|
||||
|
||||
#ifndef __jiffy_arch_data
|
||||
#define __jiffy_arch_data
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The 64-bit value is not atomic - you MUST NOT read it
|
||||
* without sampling the sequence number in jiffies_lock.
|
||||
* get_jiffies_64() will do this for you as appropriate.
|
||||
*/
|
||||
extern u64 __cacheline_aligned_in_smp jiffies_64;
|
||||
extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
|
||||
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
|
||||
|
||||
#if (BITS_PER_LONG < 64)
|
||||
u64 get_jiffies_64(void);
|
||||
|
@@ -173,7 +173,6 @@ struct key {
|
||||
#ifdef KEY_DEBUGGING
|
||||
unsigned magic;
|
||||
#define KEY_DEBUG_MAGIC 0x18273645u
|
||||
#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
|
||||
#endif
|
||||
|
||||
unsigned long flags; /* status flags (change with bitops) */
|
||||
|
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
|
||||
phys_addr_t end_addr);
|
||||
#else
|
||||
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
|
||||
phys_addr_t end_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_MEMBLOCK */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
|
||||
u16 rate_val;
|
||||
};
|
||||
|
||||
struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params);
|
||||
|
@@ -787,8 +787,14 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
CQE_RSS_HTYPE_IP = 0x3 << 6,
|
||||
CQE_RSS_HTYPE_L4 = 0x3 << 2,
|
||||
CQE_RSS_HTYPE_IP = 0x3 << 2,
|
||||
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
|
||||
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
|
||||
*/
|
||||
CQE_RSS_HTYPE_L4 = 0x3 << 6,
|
||||
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
|
||||
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
|
||||
*/
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@@ -787,7 +787,12 @@ enum {
|
||||
|
||||
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
|
||||
enum {
|
||||
MLX5_CMD_ENT_STATE_PENDING_COMP,
|
||||
};
|
||||
|
||||
struct mlx5_cmd_work_ent {
|
||||
unsigned long state;
|
||||
struct mlx5_cmd_msg *in;
|
||||
struct mlx5_cmd_msg *out;
|
||||
void *uout;
|
||||
@@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
|
||||
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
|
||||
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
int nent, u64 mask, const char *name,
|
||||
|
@@ -766,6 +766,12 @@ enum {
|
||||
MLX5_CAP_PORT_TYPE_ETH = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CAP_UMR_FENCE_STRONG = 0x0,
|
||||
MLX5_CAP_UMR_FENCE_SMALL = 0x1,
|
||||
MLX5_CAP_UMR_FENCE_NONE = 0x2,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 reserved_at_0[0x80];
|
||||
|
||||
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 reserved_at_202[0x1];
|
||||
u8 ipoib_enhanced_offloads[0x1];
|
||||
u8 ipoib_basic_offloads[0x1];
|
||||
u8 reserved_at_205[0xa];
|
||||
u8 reserved_at_205[0x5];
|
||||
u8 umr_fence[0x2];
|
||||
u8 reserved_at_20c[0x3];
|
||||
u8 drain_sigerr[0x1];
|
||||
u8 cmdif_checksum[0x2];
|
||||
u8 sigerr_cqe[0x1];
|
||||
|
@@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page);
|
||||
|
||||
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
|
||||
{
|
||||
return !vma->vm_ops;
|
||||
@@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma);
|
||||
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
|
||||
#endif
|
||||
|
||||
static inline int stack_guard_page_start(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||
(vma->vm_start == addr) &&
|
||||
!vma_growsdown(vma->vm_prev, addr);
|
||||
}
|
||||
|
||||
/* Is the vma a continuation of the stack vma below it? */
|
||||
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
|
||||
}
|
||||
|
||||
static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSUP) &&
|
||||
(vma->vm_end == addr) &&
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
int vma_is_stack_for_current(struct vm_area_struct *vma);
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
@@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping,
|
||||
pgoff_t offset,
|
||||
unsigned long size);
|
||||
|
||||
extern unsigned long stack_guard_gap;
|
||||
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
|
||||
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
@@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
|
||||
return vma;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_start = vma->vm_start;
|
||||
|
||||
if (vma->vm_flags & VM_GROWSDOWN) {
|
||||
vm_start -= stack_guard_gap;
|
||||
if (vm_start > vma->vm_start)
|
||||
vm_start = 0;
|
||||
}
|
||||
return vm_start;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_end = vma->vm_end;
|
||||
|
||||
if (vma->vm_flags & VM_GROWSUP) {
|
||||
vm_end += stack_guard_gap;
|
||||
if (vm_end < vma->vm_end)
|
||||
vm_end = -PAGE_SIZE;
|
||||
}
|
||||
return vm_end;
|
||||
}
|
||||
|
||||
static inline unsigned long vma_pages(struct vm_area_struct *vma)
|
||||
{
|
||||
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
@@ -2327,6 +2324,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
|
||||
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
|
||||
#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
|
||||
static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
|
||||
{
|
||||
if (vm_fault & VM_FAULT_OOM)
|
||||
return -ENOMEM;
|
||||
if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
|
||||
return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
|
||||
if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
|
||||
|
@@ -678,6 +678,7 @@ typedef struct pglist_data {
|
||||
* is the first PFN that needs to be initialised.
|
||||
*/
|
||||
unsigned long first_deferred_pfn;
|
||||
unsigned long static_init_size;
|
||||
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@@ -467,6 +467,7 @@ enum dmi_field {
|
||||
DMI_PRODUCT_VERSION,
|
||||
DMI_PRODUCT_SERIAL,
|
||||
DMI_PRODUCT_UUID,
|
||||
DMI_PRODUCT_FAMILY,
|
||||
DMI_BOARD_VENDOR,
|
||||
DMI_BOARD_NAME,
|
||||
DMI_BOARD_VERSION,
|
||||
|
@@ -457,7 +457,7 @@ enum hwparam_type {
|
||||
hwparam_ioport, /* Module parameter configures an I/O port */
|
||||
hwparam_iomem, /* Module parameter configures an I/O mem address */
|
||||
hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */
|
||||
hwparam_irq, /* Module parameter configures an I/O port */
|
||||
hwparam_irq, /* Module parameter configures an IRQ */
|
||||
hwparam_dma, /* Module parameter configures a DMA channel */
|
||||
hwparam_dma_addr, /* Module parameter configures a DMA buffer address */
|
||||
hwparam_other, /* Module parameter configures some other value */
|
||||
|
@@ -914,8 +914,7 @@ struct xfrmdev_ops {
|
||||
*
|
||||
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
|
||||
* Called when a user wants to change the Maximum Transfer Unit
|
||||
* of a device. If not defined, any request to change MTU will
|
||||
* will return an error.
|
||||
* of a device.
|
||||
*
|
||||
* void (*ndo_tx_timeout)(struct net_device *dev);
|
||||
* Callback used when the transmitter has not made any progress
|
||||
@@ -1596,8 +1595,8 @@ enum netdev_priv_flags {
|
||||
* @rtnl_link_state: This enum represents the phases of creating
|
||||
* a new link
|
||||
*
|
||||
* @destructor: Called from unregister,
|
||||
* can be used to call free_netdev
|
||||
* @needs_free_netdev: Should unregister perform free_netdev?
|
||||
* @priv_destructor: Called from unregister
|
||||
* @npinfo: XXX: need comments on this one
|
||||
* @nd_net: Network namespace this network device is inside
|
||||
*
|
||||
@@ -1858,7 +1857,8 @@ struct net_device {
|
||||
RTNL_LINK_INITIALIZING,
|
||||
} rtnl_link_state:16;
|
||||
|
||||
void (*destructor)(struct net_device *dev);
|
||||
bool needs_free_netdev;
|
||||
void (*priv_destructor)(struct net_device *dev);
|
||||
|
||||
#ifdef CONFIG_NETPOLL
|
||||
struct netpoll_info __rcu *npinfo;
|
||||
@@ -4261,6 +4261,11 @@ static inline const char *netdev_name(const struct net_device *dev)
|
||||
return dev->name;
|
||||
}
|
||||
|
||||
static inline bool netdev_unregistering(const struct net_device *dev)
|
||||
{
|
||||
return dev->reg_state == NETREG_UNREGISTERING;
|
||||
}
|
||||
|
||||
static inline const char *netdev_reg_state(const struct net_device *dev)
|
||||
{
|
||||
switch (dev->reg_state) {
|
||||
|
@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m,
|
||||
int xt_target_to_user(const struct xt_entry_target *t,
|
||||
struct xt_entry_target __user *u);
|
||||
int xt_data_to_user(void __user *dst, const void *src,
|
||||
int usersize, int size);
|
||||
int usersize, int size, int aligned_size);
|
||||
|
||||
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
||||
struct xt_counters_info *info, bool compat);
|
||||
|
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
/* True if the target is not a standard target */
|
||||
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
|
||||
|
||||
static inline bool ebt_invalid_target(int target)
|
||||
{
|
||||
return (target < -NUM_STANDARD_TARGETS || target >= 0);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -43,11 +43,15 @@ struct of_endpoint {
|
||||
#ifdef CONFIG_OF
|
||||
int of_graph_parse_endpoint(const struct device_node *node,
|
||||
struct of_endpoint *endpoint);
|
||||
int of_graph_get_endpoint_count(const struct device_node *np);
|
||||
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
|
||||
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
|
||||
struct device_node *previous);
|
||||
struct device_node *of_graph_get_endpoint_by_regs(
|
||||
const struct device_node *parent, int port_reg, int reg);
|
||||
struct device_node *of_graph_get_remote_endpoint(
|
||||
const struct device_node *node);
|
||||
struct device_node *of_graph_get_port_parent(struct device_node *node);
|
||||
struct device_node *of_graph_get_remote_port_parent(
|
||||
const struct device_node *node);
|
||||
struct device_node *of_graph_get_remote_port(const struct device_node *node);
|
||||
@@ -61,6 +65,11 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int of_graph_get_endpoint_count(const struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_graph_get_port_by_id(
|
||||
struct device_node *node, u32 id)
|
||||
{
|
||||
@@ -80,6 +89,18 @@ static inline struct device_node *of_graph_get_endpoint_by_regs(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_graph_get_remote_endpoint(
|
||||
const struct device_node *node)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_graph_get_port_parent(
|
||||
struct device_node *node)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_graph_get_remote_port_parent(
|
||||
const struct device_node *node)
|
||||
{
|
||||
|
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
|
||||
const char *bus_id,
|
||||
struct device *parent);
|
||||
|
||||
extern int of_platform_device_destroy(struct device *dev, void *data);
|
||||
extern int of_platform_bus_probe(struct device_node *root,
|
||||
const struct of_device_id *matches,
|
||||
struct device *parent);
|
||||
|
@@ -183,6 +183,11 @@ enum pci_dev_flags {
|
||||
PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
|
||||
/* Do not use FLR even if device advertises PCI_AF_CAP */
|
||||
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
|
||||
/*
|
||||
* Resume before calling the driver's system suspend hooks, disabling
|
||||
* the direct_complete optimization.
|
||||
*/
|
||||
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
|
||||
};
|
||||
|
||||
enum pci_irq_reroute_variant {
|
||||
@@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags,
|
||||
const struct irq_affinity *aff_desc)
|
||||
{
|
||||
if (min_vecs > 1)
|
||||
return -EINVAL;
|
||||
return 1;
|
||||
if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
|
||||
return 1;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static inline void pci_free_irq_vectors(struct pci_dev *dev)
|
||||
|
@@ -42,8 +42,6 @@
|
||||
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
|
||||
* impedance to VDD). If the argument is != 0 pull-up is enabled,
|
||||
* if it is 0, pull-up is total, i.e. the pin is connected to VDD.
|
||||
* @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
|
||||
* input and output operations.
|
||||
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
|
||||
* collector) which means it is usually wired with other output ports
|
||||
* which are then pulled up with an external resistor. Setting this
|
||||
@@ -98,7 +96,6 @@ enum pin_config_param {
|
||||
PIN_CONFIG_BIAS_PULL_DOWN,
|
||||
PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
|
||||
PIN_CONFIG_BIAS_PULL_UP,
|
||||
PIN_CONFIG_BIDIRECTIONAL,
|
||||
PIN_CONFIG_DRIVE_OPEN_DRAIN,
|
||||
PIN_CONFIG_DRIVE_OPEN_SOURCE,
|
||||
PIN_CONFIG_DRIVE_PUSH_PULL,
|
||||
|
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data);
|
||||
extern void ptrace_notify(int exit_code);
|
||||
extern void __ptrace_link(struct task_struct *child,
|
||||
struct task_struct *new_parent);
|
||||
struct task_struct *new_parent,
|
||||
const struct cred *ptracer_cred);
|
||||
extern void __ptrace_unlink(struct task_struct *child);
|
||||
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
|
||||
#define PTRACE_MODE_READ 0x01
|
||||
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
|
||||
|
||||
if (unlikely(ptrace) && current->ptrace) {
|
||||
child->ptrace = current->ptrace;
|
||||
__ptrace_link(child, current->parent);
|
||||
__ptrace_link(child, current->parent, current->ptracer_cred);
|
||||
|
||||
if (child->ptrace & PT_SEIZED)
|
||||
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
|
||||
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
|
||||
|
||||
set_tsk_thread_flag(child, TIF_SIGPENDING);
|
||||
}
|
||||
else
|
||||
child->ptracer_cred = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
|
||||
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
|
||||
|
||||
int dquot_initialize(struct inode *inode);
|
||||
bool dquot_initialize_needed(struct inode *inode);
|
||||
void dquot_drop(struct inode *inode);
|
||||
struct dquot *dqget(struct super_block *sb, struct kqid qid);
|
||||
static inline struct dquot *dqgrab(struct dquot *dquot)
|
||||
@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool dquot_initialize_needed(struct inode *inode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void dquot_drop(struct inode *inode)
|
||||
{
|
||||
}
|
||||
|
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *);
|
||||
void serdev_device_close(struct serdev_device *);
|
||||
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
|
||||
void serdev_device_set_flow_control(struct serdev_device *, bool);
|
||||
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
|
||||
void serdev_device_wait_until_sent(struct serdev_device *, long);
|
||||
int serdev_device_get_tiocm(struct serdev_device *);
|
||||
int serdev_device_set_tiocm(struct serdev_device *, int, int);
|
||||
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
|
||||
return 0;
|
||||
}
|
||||
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
|
||||
static inline int serdev_device_write_buf(struct serdev_device *serdev,
|
||||
const unsigned char *buf,
|
||||
size_t count)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
|
||||
static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
|
||||
{
|
||||
@@ -301,7 +308,7 @@ struct tty_driver;
|
||||
struct device *serdev_tty_port_register(struct tty_port *port,
|
||||
struct device *parent,
|
||||
struct tty_driver *drv, int idx);
|
||||
void serdev_tty_port_unregister(struct tty_port *port);
|
||||
int serdev_tty_port_unregister(struct tty_port *port);
|
||||
#else
|
||||
static inline struct device *serdev_tty_port_register(struct tty_port *port,
|
||||
struct device *parent,
|
||||
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline void serdev_tty_port_unregister(struct tty_port *port) {}
|
||||
static inline int serdev_tty_port_unregister(struct tty_port *port)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
|
||||
|
||||
static inline int serdev_device_write_buf(struct serdev_device *serdev,
|
||||
const unsigned char *data,
|
||||
size_t count)
|
||||
{
|
||||
return serdev_device_write(serdev, data, count, 0);
|
||||
}
|
||||
|
||||
#endif /*_LINUX_SERDEV_H */
|
||||
|
@@ -84,6 +84,7 @@ struct kmem_cache {
|
||||
int red_left_pad; /* Left redzone padding size */
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct kobject kobj; /* For sysfs */
|
||||
struct work_struct kobj_remove_work;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
struct memcg_cache_params memcg_params;
|
||||
|
@@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
preempt_disable();
|
||||
retval = __srcu_read_lock(sp);
|
||||
preempt_enable();
|
||||
rcu_lock_acquire(&(sp)->dep_map);
|
||||
return retval;
|
||||
}
|
||||
|
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
|
||||
{
|
||||
char *cp = (char *)p;
|
||||
struct kvec *vec = &rqstp->rq_arg.head[0];
|
||||
return cp == (char *)vec->iov_base + vec->iov_len;
|
||||
return cp >= (char*)vec->iov_base
|
||||
&& cp <= (char*)vec->iov_base + vec->iov_len;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
|
||||
struct platform_freeze_ops {
|
||||
int (*begin)(void);
|
||||
int (*prepare)(void);
|
||||
void (*wake)(void);
|
||||
void (*sync)(void);
|
||||
void (*restore)(void);
|
||||
void (*end)(void);
|
||||
};
|
||||
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
|
||||
|
||||
extern bool pm_wakeup_pending(void);
|
||||
extern void pm_system_wakeup(void);
|
||||
extern void pm_system_cancel_wakeup(void);
|
||||
extern void pm_wakeup_clear(bool reset);
|
||||
extern void pm_wakeup_clear(void);
|
||||
extern void pm_system_irq_wakeup(unsigned int irq_number);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
||||
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
static inline void pm_system_wakeup(void) {}
|
||||
static inline void pm_wakeup_clear(bool reset) {}
|
||||
static inline void pm_wakeup_clear(void) {}
|
||||
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
|
||||
|
||||
static inline void lock_system_sleep(void) {}
|
||||
|
@@ -29,7 +29,6 @@
|
||||
*/
|
||||
struct tk_read_base {
|
||||
struct clocksource *clock;
|
||||
u64 (*read)(struct clocksource *cs);
|
||||
u64 mask;
|
||||
u64 cycle_last;
|
||||
u32 mult;
|
||||
@@ -58,7 +57,7 @@ struct tk_read_base {
|
||||
* interval.
|
||||
* @xtime_remainder: Shifted nano seconds left over when rounding
|
||||
* @cycle_interval
|
||||
* @raw_interval: Raw nano seconds accumulated per NTP interval.
|
||||
* @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
|
||||
* @ntp_error: Difference between accumulated time and NTP time in ntp
|
||||
* shifted nano seconds.
|
||||
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
|
||||
@@ -100,7 +99,7 @@ struct timekeeper {
|
||||
u64 cycle_interval;
|
||||
u64 xtime_interval;
|
||||
s64 xtime_remainder;
|
||||
u32 raw_interval;
|
||||
u64 raw_interval;
|
||||
/* The ntp_tick_length() value currently being used.
|
||||
* This cached copy ensures we consistently apply the tick
|
||||
* length for an entire tick, as ntp_tick_length may change
|
||||
|
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port,
|
||||
struct tty_driver *driver, unsigned index,
|
||||
struct device *device, void *drvdata,
|
||||
const struct attribute_group **attr_grp);
|
||||
extern struct device *tty_port_register_device_serdev(struct tty_port *port,
|
||||
struct tty_driver *driver, unsigned index,
|
||||
struct device *device);
|
||||
extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
|
||||
struct tty_driver *driver, unsigned index,
|
||||
struct device *device, void *drvdata,
|
||||
const struct attribute_group **attr_grp);
|
||||
extern void tty_port_unregister_device(struct tty_port *port,
|
||||
struct tty_driver *driver, unsigned index);
|
||||
extern int tty_port_alloc_xmit_buf(struct tty_port *port);
|
||||
extern void tty_port_free_xmit_buf(struct tty_port *port);
|
||||
extern void tty_port_destroy(struct tty_port *port);
|
||||
|
@@ -206,6 +206,7 @@ struct cdc_state {
|
||||
};
|
||||
|
||||
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
|
||||
extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
|
||||
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
|
||||
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
|
||||
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
|
||||
|
Reference in New Issue
Block a user