Merge tag 'v3.19-rc5' into next
Linux 3.19-rc5 Conflicts: drivers/usb/dwc2/gadget.c drivers/usb/gadget/udc/bdc/bdc_ep.c
This commit is contained in:
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
|
||||
unsigned long flags; /* BLK_MQ_F_* flags */
|
||||
|
||||
struct request_queue *queue;
|
||||
unsigned int queue_num;
|
||||
struct blk_flush_queue *fq;
|
||||
|
||||
void *driver_data;
|
||||
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
|
||||
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
|
||||
|
||||
unsigned int numa_node;
|
||||
unsigned int cmd_size; /* per-request extra data */
|
||||
unsigned int queue_num;
|
||||
|
||||
atomic_t nr_active;
|
||||
|
||||
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
|
||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
||||
|
||||
int blk_mq_request_started(struct request *rq);
|
||||
void blk_mq_start_request(struct request *rq);
|
||||
void blk_mq_end_request(struct request *rq, int error);
|
||||
void __blk_mq_end_request(struct request *rq, int error);
|
||||
|
||||
void blk_mq_requeue_request(struct request *rq);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
|
||||
void blk_mq_cancel_requeue_work(struct request_queue *q);
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_abort_requeue_list(struct request_queue *q);
|
||||
void blk_mq_complete_request(struct request *rq);
|
||||
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
|
||||
void *priv);
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q);
|
||||
|
||||
/*
|
||||
* Driver command data is immediately after the request. So subtract request
|
||||
|
@@ -190,6 +190,7 @@ enum rq_flag_bits {
|
||||
__REQ_PM, /* runtime pm request */
|
||||
__REQ_HASHED, /* on IO scheduler merge hash */
|
||||
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
|
||||
__REQ_NO_TIMEOUT, /* requests may never expire */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@@ -243,5 +244,6 @@ enum rq_flag_bits {
|
||||
#define REQ_PM (1ULL << __REQ_PM)
|
||||
#define REQ_HASHED (1ULL << __REQ_HASHED)
|
||||
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
|
||||
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
|
||||
|
||||
#endif /* __LINUX_BLK_TYPES_H */
|
||||
|
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
|
||||
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
|
||||
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching reads or writes. The
|
||||
* compiler is also forbidden from reordering successive instances of
|
||||
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
|
||||
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
|
||||
* compiler is aware of some particular ordering. One way to make the
|
||||
* compiler aware of ordering is to put the two invocations of READ_ONCE,
|
||||
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
|
||||
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
|
||||
* data types like structs or unions. If the size of the accessed data
|
||||
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
|
||||
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
|
||||
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
|
||||
* compile-time warning.
|
||||
*
|
||||
* Their two major use cases are: (1) Mediating communication between
|
||||
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
|
||||
#define READ_ONCE(x) \
|
||||
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
|
||||
|
||||
#define ASSIGN_ONCE(val, x) \
|
||||
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
|
||||
#define WRITE_ONCE(x, val) \
|
||||
({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@@ -137,6 +137,7 @@ struct sdhci_host {
|
||||
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
|
||||
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
|
||||
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
|
||||
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
|
||||
|
||||
unsigned int version; /* SDHCI spec. version */
|
||||
|
||||
|
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||
* 3. Update dev->stats asynchronously and atomically, and define
|
||||
* neither operation.
|
||||
*
|
||||
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
|
||||
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
|
||||
* If device support VLAN filtering this function is called when a
|
||||
* VLAN id is registered.
|
||||
*
|
||||
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
|
||||
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
|
||||
* If device support VLAN filtering this function is called when a
|
||||
* VLAN id is unregistered.
|
||||
*
|
||||
@@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
|
||||
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
|
||||
#define for_each_netdev_in_bond_rcu(bond, slave) \
|
||||
for_each_netdev_rcu(&init_net, slave) \
|
||||
if (netdev_master_upper_dev_get_rcu(slave) == bond)
|
||||
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
|
||||
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
|
||||
|
||||
static inline struct net_device *next_net_device(struct net_device *dev)
|
||||
|
@@ -74,6 +74,9 @@ struct nfs_client {
|
||||
/* idmapper */
|
||||
struct idmap * cl_idmap;
|
||||
|
||||
/* Client owner identifier */
|
||||
const char * cl_owner_id;
|
||||
|
||||
/* Our own IP address, as a null-terminated string.
|
||||
* This is used to generate the mv0 callback address.
|
||||
*/
|
||||
|
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
|
||||
#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
|
||||
|
||||
#define OMAP_CTRL_PCIE_PCS_MASK 0xff
|
||||
#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8
|
||||
#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
|
||||
|
||||
#define OMAP_CTRL_USB2_PHY_PD BIT(28)
|
||||
|
||||
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
|
||||
void omap_control_phy_power(struct device *dev, int on);
|
||||
void omap_control_usb_set_mode(struct device *dev,
|
||||
enum omap_control_usb_mode mode);
|
||||
void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay);
|
||||
void omap_control_pcie_pcs(struct device *dev, u8 delay);
|
||||
#else
|
||||
|
||||
static inline void omap_control_phy_power(struct device *dev, int on)
|
||||
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
|
||||
static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user