Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/sched/act_police.c net/sched/sch_drr.c net/sched/sch_hfsc.c net/sched/sch_prio.c net/sched/sch_red.c net/sched/sch_tbf.c In net-next the drop methods of the packet schedulers got removed, so the bug fixes to them in 'net' are irrelevant. A packet action unload crash fix conflicts with the addition of the new firstuse timestamp. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -65,6 +65,7 @@ struct coredump_params {
|
||||
unsigned long limit;
|
||||
unsigned long mm_flags;
|
||||
loff_t written;
|
||||
loff_t pos;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -279,6 +279,11 @@ struct ceph_osd_client {
|
||||
struct workqueue_struct *notify_wq;
|
||||
};
|
||||
|
||||
static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
|
||||
{
|
||||
return osdc->osdmap->flags & flag;
|
||||
}
|
||||
|
||||
extern int ceph_osdc_setup(void);
|
||||
extern void ceph_osdc_cleanup(void);
|
||||
|
||||
|
||||
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
|
||||
return !ceph_osd_is_up(map, osd);
|
||||
}
|
||||
|
||||
static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
|
||||
{
|
||||
return map && (map->flags & flag);
|
||||
}
|
||||
|
||||
extern char *ceph_osdmap_state_str(char *str, int len, int state);
|
||||
extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
|
||||
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
|
||||
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
|
||||
#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
|
||||
/* unused */
|
||||
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
|
||||
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
|
||||
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
|
||||
|
||||
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
|
||||
extern int cpuidle_play_dead(void);
|
||||
|
||||
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
|
||||
static inline struct cpuidle_device *cpuidle_get_device(void)
|
||||
{return __this_cpu_read(cpuidle_devices); }
|
||||
#else
|
||||
static inline void disable_cpuidle(void) { }
|
||||
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
|
||||
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
|
||||
static inline int cpuidle_play_dead(void) {return -ENODEV; }
|
||||
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
||||
struct cpuidle_device *dev) {return NULL; }
|
||||
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
|
||||
|
||||
@@ -15,13 +15,12 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct pts_fs_info;
|
||||
|
||||
#ifdef CONFIG_UNIX98_PTYS
|
||||
|
||||
/* Look up a pts fs info and get a ref to it */
|
||||
struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
|
||||
void devpts_put_ref(struct pts_fs_info *);
|
||||
struct pts_fs_info;
|
||||
|
||||
struct pts_fs_info *devpts_acquire(struct file *);
|
||||
void devpts_release(struct pts_fs_info *);
|
||||
|
||||
int devpts_new_index(struct pts_fs_info *);
|
||||
void devpts_kill_index(struct pts_fs_info *, int);
|
||||
|
||||
@@ -112,19 +112,24 @@ struct dma_buf_ops {
|
||||
* @file: file pointer used for sharing buffers across, and for refcounting.
|
||||
* @attachments: list of dma_buf_attachment that denotes all devices attached.
|
||||
* @ops: dma_buf_ops associated with this buffer object.
|
||||
* @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
|
||||
* @vmapping_counter: used internally to refcnt the vmaps
|
||||
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
|
||||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @owner: pointer to exporter module; used for refcounting when exporter is a
|
||||
* kernel module.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
* @priv: exporter specific private data for this buffer object.
|
||||
* @resv: reservation object linked to this dma-buf
|
||||
* @poll: for userspace poll support
|
||||
* @cb_excl: for userspace poll support
|
||||
* @cb_shared: for userspace poll support
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
struct file *file;
|
||||
struct list_head attachments;
|
||||
const struct dma_buf_ops *ops;
|
||||
/* mutex to serialize list manipulation, attach/detach and vmap/unmap */
|
||||
struct mutex lock;
|
||||
unsigned vmapping_counter;
|
||||
void *vmap_ptr;
|
||||
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
|
||||
|
||||
/**
|
||||
* helper macro for exporters; zeros and fills in most common values
|
||||
*
|
||||
* @name: export-info name
|
||||
*/
|
||||
#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
|
||||
struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
|
||||
#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
|
||||
struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
|
||||
.owner = THIS_MODULE }
|
||||
|
||||
/**
|
||||
|
||||
@@ -49,6 +49,8 @@ struct fence_cb;
|
||||
* @timestamp: Timestamp when the fence was signaled.
|
||||
* @status: Optional, only valid if < 0, must be set before calling
|
||||
* fence_signal, indicates that the fence has completed with an error.
|
||||
* @child_list: list of children fences
|
||||
* @active_list: list of active fences
|
||||
*
|
||||
* the flags member must be manipulated and read using the appropriate
|
||||
* atomic ops (bit_*), so taking the spinlock will not be needed most
|
||||
|
||||
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
|
||||
|
||||
/* check the consistency between the backing cache and the FS-Cache
|
||||
* cookie */
|
||||
bool (*check_consistency)(struct fscache_operation *op);
|
||||
int (*check_consistency)(struct fscache_operation *op);
|
||||
|
||||
/* store the updated auxiliary data on an object */
|
||||
void (*update_object)(struct fscache_object *object);
|
||||
|
||||
@@ -305,12 +305,12 @@
|
||||
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
|
||||
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_SHIFT 24
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
|
||||
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
|
||||
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
|
||||
|
||||
#include <asm/arch_gicv3.h>
|
||||
|
||||
|
||||
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
|
||||
u8 rsvd[8];
|
||||
};
|
||||
|
||||
#define MLX5_CMD_OP_MAX 0x920
|
||||
|
||||
enum {
|
||||
VPORT_STATE_DOWN = 0x0,
|
||||
VPORT_STATE_UP = 0x1,
|
||||
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
|
||||
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
|
||||
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
|
||||
|
||||
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET(flow_table_eswitch_cap, \
|
||||
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
|
||||
|
||||
@@ -205,7 +205,8 @@ enum {
|
||||
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
|
||||
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
|
||||
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
|
||||
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
|
||||
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
|
||||
MLX5_CMD_OP_MAX
|
||||
};
|
||||
|
||||
struct mlx5_ifc_flow_table_fields_supported_bits {
|
||||
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
|
||||
u8 vport_svlan_insert[0x1];
|
||||
u8 vport_cvlan_insert_if_not_exist[0x1];
|
||||
u8 vport_cvlan_insert_overwrite[0x1];
|
||||
u8 reserved_at_5[0x1b];
|
||||
u8 reserved_at_5[0x19];
|
||||
u8 nic_vport_node_guid_modify[0x1];
|
||||
u8 nic_vport_port_guid_modify[0x1];
|
||||
|
||||
u8 reserved_at_20[0x7e0];
|
||||
};
|
||||
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_nic_vport_field_select_bits {
|
||||
u8 reserved_at_0[0x19];
|
||||
u8 reserved_at_0[0x16];
|
||||
u8 node_guid[0x1];
|
||||
u8 port_guid[0x1];
|
||||
u8 reserved_at_18[0x1];
|
||||
u8 mtu[0x1];
|
||||
u8 change_event[0x1];
|
||||
u8 promisc[0x1];
|
||||
|
||||
@@ -460,10 +460,9 @@ struct mlx5_core_qp {
|
||||
};
|
||||
|
||||
struct mlx5_qp_path {
|
||||
u8 fl;
|
||||
u8 fl_free_ar;
|
||||
u8 rsvd3;
|
||||
u8 free_ar;
|
||||
u8 pkey_index;
|
||||
__be16 pkey_index;
|
||||
u8 rsvd0;
|
||||
u8 grh_mlid;
|
||||
__be16 rlid;
|
||||
@@ -560,6 +559,7 @@ struct mlx5_modify_qp_mbox_in {
|
||||
__be32 optparam;
|
||||
u8 rsvd0[4];
|
||||
struct mlx5_qp_context ctx;
|
||||
u8 rsvd2[16];
|
||||
};
|
||||
|
||||
struct mlx5_modify_qp_mbox_out {
|
||||
|
||||
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
|
||||
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
u32 vport, u64 node_guid);
|
||||
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
u16 *qkey_viol_cntr);
|
||||
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
|
||||
|
||||
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
|
||||
#define LOOKUP_ROOT 0x2000
|
||||
#define LOOKUP_EMPTY 0x4000
|
||||
|
||||
extern int path_pts(struct path *path);
|
||||
|
||||
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
|
||||
|
||||
static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
|
||||
|
||||
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
|
||||
|
||||
static inline bool page_is_young(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_young(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline bool test_and_clear_page_young(struct page *page)
|
||||
{
|
||||
return test_and_clear_bit(PAGE_EXT_YOUNG,
|
||||
&lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline bool page_is_idle(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_idle(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void clear_page_idle(struct page *page)
|
||||
{
|
||||
clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
||||
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
|
||||
extern struct lock_class_key reservation_seqcount_class;
|
||||
extern const char reservation_seqcount_string[];
|
||||
|
||||
/**
|
||||
* struct reservation_object_list - a list of shared fences
|
||||
* @rcu: for internal use
|
||||
* @shared_count: table of shared fences
|
||||
* @shared_max: for growing shared fence table
|
||||
* @shared: shared fence table
|
||||
*/
|
||||
struct reservation_object_list {
|
||||
struct rcu_head rcu;
|
||||
u32 shared_count, shared_max;
|
||||
struct fence __rcu *shared[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct reservation_object - a reservation object manages fences for a buffer
|
||||
* @lock: update side lock
|
||||
* @seq: sequence count for managing RCU read-side synchronization
|
||||
* @fence_excl: the exclusive fence, if there is one currently
|
||||
* @fence: list of current shared fences
|
||||
* @staged: staged copy of shared fences for RCU updates
|
||||
*/
|
||||
struct reservation_object {
|
||||
struct ww_mutex lock;
|
||||
seqcount_t seq;
|
||||
@@ -68,6 +83,10 @@ struct reservation_object {
|
||||
#define reservation_object_assert_held(obj) \
|
||||
lockdep_assert_held(&(obj)->lock.base)
|
||||
|
||||
/**
|
||||
* reservation_object_init - initialize a reservation object
|
||||
* @obj: the reservation object
|
||||
*/
|
||||
static inline void
|
||||
reservation_object_init(struct reservation_object *obj)
|
||||
{
|
||||
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
|
||||
obj->staged = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_fini - destroys a reservation object
|
||||
* @obj: the reservation object
|
||||
*/
|
||||
static inline void
|
||||
reservation_object_fini(struct reservation_object *obj)
|
||||
{
|
||||
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
|
||||
ww_mutex_destroy(&obj->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_get_list - get the reservation object's
|
||||
* shared fence list, with update-side lock held
|
||||
* @obj: the reservation object
|
||||
*
|
||||
* Returns the shared fence list. Does NOT take references to
|
||||
* the fence. The obj->lock must be held.
|
||||
*/
|
||||
static inline struct reservation_object_list *
|
||||
reservation_object_get_list(struct reservation_object *obj)
|
||||
{
|
||||
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
|
||||
reservation_object_held(obj));
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_get_excl - get the reservation object's
|
||||
* exclusive fence, with update-side lock held
|
||||
* @obj: the reservation object
|
||||
*
|
||||
* Returns the exclusive fence (if any). Does NOT take a
|
||||
* reference. The obj->lock must be held.
|
||||
*
|
||||
* RETURNS
|
||||
* The exclusive fence or NULL
|
||||
*/
|
||||
static inline struct fence *
|
||||
reservation_object_get_excl(struct reservation_object *obj)
|
||||
{
|
||||
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
|
||||
reservation_object_held(obj));
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_get_excl_rcu - get the reservation object's
|
||||
* exclusive fence, without lock held.
|
||||
* @obj: the reservation object
|
||||
*
|
||||
* If there is an exclusive fence, this atomically increments it's
|
||||
* reference count and returns it.
|
||||
*
|
||||
* RETURNS
|
||||
* The exclusive fence or NULL if none
|
||||
*/
|
||||
static inline struct fence *
|
||||
reservation_object_get_excl_rcu(struct reservation_object *obj)
|
||||
{
|
||||
|
||||
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
|
||||
struct timespec64 ts64;
|
||||
|
||||
if (!tv)
|
||||
return do_sys_settimeofday64(NULL, tz);
|
||||
|
||||
if (!timespec_valid(tv))
|
||||
return -EINVAL;
|
||||
|
||||
ts64 = timespec_to_timespec64(*tv);
|
||||
|
||||
Reference in New Issue
Block a user