Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several conflicts here. NFP driver bug fix adding nfp_netdev_is_nfp_repr() check to nfp_fl_output() needed some adjustments because the code block is in an else block now. Parallel additions to net/pkt_cls.h and net/sch_generic.h A bug fix in __tcp_retransmit_skb() conflicted with some of the rbtree changes in net-next. The tc action RCU callback fixes in 'net' had some overlap with some of the recent tcf_block reworking. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -77,8 +77,8 @@ void tap_del_queues(struct tap_dev *tap);
|
||||
int tap_get_minor(dev_t major, struct tap_dev *tap);
|
||||
void tap_free_minor(dev_t major, struct tap_dev *tap);
|
||||
int tap_queue_resize(struct tap_dev *tap);
|
||||
int tap_create_cdev(struct cdev *tap_cdev,
|
||||
dev_t *tap_major, const char *device_name);
|
||||
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
|
||||
const char *device_name, struct module *module);
|
||||
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
|
||||
|
||||
#endif /*_LINUX_IF_TAP_H_*/
|
||||
|
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
|
||||
void irq_gc_unmask_enable_reg(struct irq_data *d);
|
||||
void irq_gc_ack_set_bit(struct irq_data *d);
|
||||
void irq_gc_ack_clr_bit(struct irq_data *d);
|
||||
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
|
||||
void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
|
||||
void irq_gc_eoi(struct irq_data *d);
|
||||
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
|
||||
|
||||
|
@@ -372,6 +372,8 @@
|
||||
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
|
||||
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
|
||||
#define GITS_BASER_PHYS_52_to_48(phys) \
|
||||
(((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
|
||||
#define GITS_BASER_SHAREABILITY_SHIFT (10)
|
||||
#define GITS_BASER_InnerShareable \
|
||||
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
|
||||
|
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
|
||||
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
|
||||
u8 prio, u8 *tc);
|
||||
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
|
||||
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
|
||||
u8 tc, u8 *tc_group);
|
||||
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
|
||||
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
|
||||
u8 tc, u8 *bw_pct);
|
||||
|
@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
|
||||
PM_QOS_FLAGS_ALL,
|
||||
};
|
||||
|
||||
#define PM_QOS_DEFAULT_VALUE -1
|
||||
#define PM_QOS_DEFAULT_VALUE (-1)
|
||||
#define PM_QOS_LATENCY_ANY S32_MAX
|
||||
|
||||
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
||||
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
|
||||
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
|
||||
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
|
||||
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
|
||||
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
|
||||
#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
|
||||
|
||||
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
|
||||
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
|
||||
|
@@ -231,7 +231,7 @@ struct sctp_datahdr {
|
||||
__be32 tsn;
|
||||
__be16 stream;
|
||||
__be16 ssn;
|
||||
__be32 ppid;
|
||||
__u32 ppid;
|
||||
__u8 payload[0];
|
||||
};
|
||||
|
||||
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
|
||||
|
||||
struct sctp_strreset_outreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u32 response_seq;
|
||||
__u32 send_reset_at_tsn;
|
||||
__u16 list_of_streams[0];
|
||||
__be32 request_seq;
|
||||
__be32 response_seq;
|
||||
__be32 send_reset_at_tsn;
|
||||
__be16 list_of_streams[0];
|
||||
};
|
||||
|
||||
struct sctp_strreset_inreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u16 list_of_streams[0];
|
||||
__be32 request_seq;
|
||||
__be16 list_of_streams[0];
|
||||
};
|
||||
|
||||
struct sctp_strreset_tsnreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__be32 request_seq;
|
||||
};
|
||||
|
||||
struct sctp_strreset_addstrm {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u16 number_of_streams;
|
||||
__u16 reserved;
|
||||
__be32 request_seq;
|
||||
__be16 number_of_streams;
|
||||
__be16 reserved;
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -752,16 +752,16 @@ enum {
|
||||
|
||||
struct sctp_strreset_resp {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 response_seq;
|
||||
__u32 result;
|
||||
__be32 response_seq;
|
||||
__be32 result;
|
||||
};
|
||||
|
||||
struct sctp_strreset_resptsn {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 response_seq;
|
||||
__u32 result;
|
||||
__u32 senders_next_tsn;
|
||||
__u32 receivers_next_tsn;
|
||||
__be32 response_seq;
|
||||
__be32 result;
|
||||
__be32 senders_next_tsn;
|
||||
__be32 receivers_next_tsn;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_SCTP_H__ */
|
||||
|
@@ -9,13 +9,16 @@
|
||||
/*
|
||||
* Simple wait queues
|
||||
*
|
||||
* While these are very similar to the other/complex wait queues (wait.h) the
|
||||
* most important difference is that the simple waitqueue allows for
|
||||
* deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
|
||||
* times.
|
||||
* While these are very similar to regular wait queues (wait.h) the most
|
||||
* important difference is that the simple waitqueue allows for deterministic
|
||||
* behaviour -- IOW it has strictly bounded IRQ and lock hold times.
|
||||
*
|
||||
* In order to make this so, we had to drop a fair number of features of the
|
||||
* other waitqueue code; notably:
|
||||
* Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
|
||||
* from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
|
||||
* priority task a chance to run.
|
||||
*
|
||||
* Secondly, we had to drop a fair number of features of the other waitqueue
|
||||
* code; notably:
|
||||
*
|
||||
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
|
||||
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
|
||||
@@ -24,12 +27,14 @@
|
||||
* - the exclusive mode; because this requires preserving the list order
|
||||
* and this is hard.
|
||||
*
|
||||
* - custom wake functions; because you cannot give any guarantees about
|
||||
* random code.
|
||||
* - custom wake callback functions; because you cannot give any guarantees
|
||||
* about random code. This also allows swait to be used in RT, such that
|
||||
* raw spinlock can be used for the swait queue head.
|
||||
*
|
||||
* As a side effect of this; the data structures are slimmer.
|
||||
*
|
||||
* One would recommend using this wait queue where possible.
|
||||
* As a side effect of these; the data structures are slimmer albeit more ad-hoc.
|
||||
* For all the above, note that simple wait queues should _only_ be used under
|
||||
* very specific realtime constraints -- it is best to stick with the regular
|
||||
* wait queues in most cases.
|
||||
*/
|
||||
|
||||
struct task_struct;
|
||||
|
Reference in New Issue
Block a user