Merge tag 'kvm-arm-for-v4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM Fixes for v4.4-rc3. Includes some timer fixes, properly unmapping PTEs, an errata fix, and two tweaks to the EL2 panic code.
This commit is contained in:
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
struct scsi_ioctl_command __user *);
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
|
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
|
||||
int configfs_register_subsystem(struct configfs_subsystem *subsys);
|
||||
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
|
||||
|
||||
int configfs_register_group(struct config_group *parent_group,
|
||||
struct config_group *group);
|
||||
void configfs_unregister_group(struct config_group *group);
|
||||
|
||||
struct config_group *
|
||||
configfs_register_default_group(struct config_group *parent_group,
|
||||
const char *name,
|
||||
struct config_item_type *item_type);
|
||||
void configfs_unregister_default_group(struct config_group *group);
|
||||
|
||||
/* These functions can sleep and can alloc with GFP_KERNEL */
|
||||
/* WARNING: These cannot be called underneath configfs callbacks!! */
|
||||
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
|
||||
|
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
|
||||
|
||||
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
{
|
||||
return gfp_flags & __GFP_DIRECT_RECLAIM;
|
||||
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#define MARVELL_PHY_ID_88E1318S 0x01410e90
|
||||
#define MARVELL_PHY_ID_88E1116R 0x01410e40
|
||||
#define MARVELL_PHY_ID_88E1510 0x01410dd0
|
||||
#define MARVELL_PHY_ID_88E1540 0x01410eb0
|
||||
#define MARVELL_PHY_ID_88E3016 0x01410e60
|
||||
|
||||
/* struct phy_device dev_flags definitions */
|
||||
|
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 lro_cap[0x1];
|
||||
u8 lro_psh_flag[0x1];
|
||||
u8 lro_time_stamp[0x1];
|
||||
u8 reserved_0[0x6];
|
||||
u8 reserved_0[0x3];
|
||||
u8 self_lb_en_modifiable[0x1];
|
||||
u8 reserved_1[0x2];
|
||||
u8 max_lso_cap[0x5];
|
||||
u8 reserved_1[0x4];
|
||||
u8 reserved_2[0x4];
|
||||
u8 rss_ind_tbl_cap[0x4];
|
||||
u8 reserved_2[0x3];
|
||||
u8 reserved_3[0x3];
|
||||
u8 tunnel_lso_const_out_ip_id[0x1];
|
||||
u8 reserved_3[0x2];
|
||||
u8 reserved_4[0x2];
|
||||
u8 tunnel_statless_gre[0x1];
|
||||
u8 tunnel_stateless_vxlan[0x1];
|
||||
|
||||
u8 reserved_4[0x20];
|
||||
u8 reserved_5[0x20];
|
||||
|
||||
u8 reserved_5[0x10];
|
||||
u8 reserved_6[0x10];
|
||||
u8 lro_min_mss_size[0x10];
|
||||
|
||||
u8 reserved_6[0x120];
|
||||
u8 reserved_7[0x120];
|
||||
|
||||
u8 lro_timer_supported_periods[4][0x20];
|
||||
|
||||
u8 reserved_7[0x600];
|
||||
u8 reserved_8[0x600];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_roce_cap_bits {
|
||||
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_tir_bitmask_bits {
|
||||
u8 reserved[0x20];
|
||||
u8 reserved_0[0x20];
|
||||
|
||||
u8 reserved1[0x1f];
|
||||
u8 reserved_1[0x1b];
|
||||
u8 self_lb_en[0x1];
|
||||
u8 reserved_2[0x3];
|
||||
u8 lro[0x1];
|
||||
};
|
||||
|
||||
|
@@ -2068,20 +2068,23 @@ struct pcpu_sw_netstats {
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
})
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
__netdev_alloc_pcpu_stats(type, GFP_KERNEL);
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
/* netdevice notifier chain. Please remember to update the rtnetlink
|
||||
@@ -3854,6 +3857,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
|
||||
return dev->priv_flags & IFF_EBRIDGE;
|
||||
}
|
||||
|
||||
static inline bool netif_is_bridge_port(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_BRIDGE_PORT;
|
||||
}
|
||||
|
||||
static inline bool netif_is_ovs_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_OPENVSWITCH;
|
||||
|
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
|
||||
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
|
||||
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
|
||||
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
|
||||
size_t len);
|
||||
size_t len, size_t align);
|
||||
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
|
||||
struct ip_set_ext *ext);
|
||||
|
||||
|
@@ -5,10 +5,13 @@
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
static inline int nf_hook_ingress_active(struct sk_buff *skb)
|
||||
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
|
||||
{
|
||||
return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
|
||||
NFPROTO_NETDEV, NF_NETDEV_INGRESS);
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
|
||||
return false;
|
||||
#endif
|
||||
return !list_empty(&skb->dev->nf_hooks_ingress);
|
||||
}
|
||||
|
||||
static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
struct nf_hook_state state;
|
||||
|
||||
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
|
||||
skb->dev, NULL, dev_net(skb->dev), NULL);
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
|
||||
skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
|
||||
return nf_hook_slow(skb, &state);
|
||||
}
|
||||
|
||||
|
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
|
||||
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||
|
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
|
||||
extern void set_current_blocked(sigset_t *);
|
||||
extern void __set_current_blocked(const sigset_t *);
|
||||
extern int show_unhandled_signals;
|
||||
extern int sigsuspend(sigset_t *);
|
||||
|
||||
struct sigaction {
|
||||
#ifndef __ARCH_HAS_IRIX_SIGACTION
|
||||
|
@@ -157,6 +157,24 @@ size_t ksize(const void *);
|
||||
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
|
||||
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
|
||||
* aligned pointers.
|
||||
*/
|
||||
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
|
||||
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
|
||||
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Kmalloc array related definitions
|
||||
*/
|
||||
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
|
||||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
void *__kmalloc(size_t size, gfp_t flags);
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
|
||||
void kmem_cache_free(struct kmem_cache *, void *);
|
||||
|
||||
/*
|
||||
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
|
||||
* Note that interrupts must be enabled when calling these functions.
|
||||
*/
|
||||
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
||||
bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size);
|
||||
int node, size_t size) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
||||
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
struct memcg_cache_array {
|
||||
struct rcu_head rcu;
|
||||
struct kmem_cache *entries[0];
|
||||
|
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
|
||||
|
||||
/* tty_audit.c */
|
||||
#ifdef CONFIG_AUDIT
|
||||
extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
|
||||
extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
|
||||
size_t size, unsigned icanon);
|
||||
extern void tty_audit_exit(void);
|
||||
extern void tty_audit_fork(struct signal_struct *sig);
|
||||
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
|
||||
extern void tty_audit_push(struct tty_struct *tty);
|
||||
extern int tty_audit_push_current(void);
|
||||
#else
|
||||
static inline void tty_audit_add_data(struct tty_struct *tty,
|
||||
unsigned char *data, size_t size, unsigned icanon)
|
||||
static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
|
||||
size_t size, unsigned icanon)
|
||||
{
|
||||
}
|
||||
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
|
||||
|
Reference in New Issue
Block a user