Merge branch 'devel-stable' into for-next

This commit is contained in:
Russell King
2014-04-04 00:33:49 +01:00
1043 changed files with 15905 additions and 8988 deletions

View File

@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio)
struct bio_vec bv;
struct bvec_iter iter;
/*
* We special case discard/write same, because they interpret bi_size
* differently:
*/
if (bio->bi_rw & REQ_DISCARD)
return 1;
if (bio->bi_rw & REQ_WRITE_SAME)
return 1;
bio_for_each_segment(bv, bio, iter)
segs++;
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set *fs_bio_set;
unsigned int bio_integrity_tag_size(struct bio *bio);
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{

View File

@@ -83,6 +83,8 @@ struct blk_mq_ops {
*/
rq_timed_out_fn *timeout;
softirq_done_fn *complete;
/*
* Override for hctx allocations (should probably go)
*/
@@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request_queue *, struct request *, bool);
void blk_mq_insert_request(struct request_queue *, struct request *,
bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
@@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);

View File

@@ -98,7 +98,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
struct work_struct mq_flush_data;
struct work_struct mq_flush_work;
};
struct request_queue *q;
@@ -448,13 +448,8 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
union {
struct request flush_rq;
struct {
spinlock_t mq_flush_lock;
struct work_struct mq_flush_work;
};
};
struct request *flush_rq;
spinlock_t mq_flush_lock;
struct mutex sysfs_lock;

View File

@@ -11,7 +11,9 @@
#define CAN_SKB_H
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <net/sock.h>
/*
* The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
static inline void can_skb_destructor(struct sk_buff *skb)
{
sock_put(skb->sk);
}
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
if (sk) {
sock_hold(sk);
skb->destructor = can_skb_destructor;
skb->sk = sk;
}
}
/*
* returns an unshared skb owned by the original sock to be echo'ed back
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
can_skb_set_owner(nskb, skb->sk);
consume_skb(skb);
return nskb;
} else {
kfree_skb(skb);
return NULL;
}
}
/* we can assume to have an unshared skb with proper owner */
return skb;
}
#endif /* CAN_SKB_H */

View File

@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op);
/*
* Ceph setxattr request flags.
*/
#define CEPH_XATTR_CREATE 1
#define CEPH_XATTR_REPLACE 2
#define CEPH_XATTR_CREATE (1 << 0)
#define CEPH_XATTR_REPLACE (1 << 1)
#define CEPH_XATTR_REMOVE (1 << 31)
union ceph_mds_request_args {
struct {

View File

@@ -166,6 +166,8 @@ struct cgroup {
*
* The ID of the root cgroup is always 0, and a new cgroup
* will be assigned with a smallest available ID.
*
* Allocating/Removing ID must be protected by cgroup_mutex.
*/
int id;

View File

@@ -75,11 +75,7 @@
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#if GCC_VERSION <= 40801
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
#endif
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400

View File

@@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *);
#define dma_buf_export(priv, ops, size, flags) \
dma_buf_export_named(priv, ops, size, flags, __FILE__)
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);

View File

@@ -99,7 +99,7 @@ struct fsnotify_ops {
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
const unsigned char *file_name);
const unsigned char *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
@@ -160,7 +160,7 @@ struct fsnotify_group {
struct fasync_struct *fsn_fa; /* async notification */
struct fsnotify_event overflow_event; /* Event we queue when the
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */

View File

@@ -4,8 +4,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#ifdef CONFIG_GPIOLIB
struct device;
struct gpio_chip;
@@ -18,6 +16,8 @@ struct gpio_chip;
*/
struct gpio_desc;
#ifdef CONFIG_GPIOLIB
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id);

View File

@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released {
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
u32 padding2;
u32 target_vcpu; /* The VCPU the host should respond to */
u64 interrupt_page;
u64 monitor_page1;
u64 monitor_page2;

View File

@@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
devname, dev_id);
}
extern int __must_check
devm_request_any_context_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
/*

View File

@@ -118,9 +118,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
* the new maximum will handle anyone else. I may have to revisit this
* in the future.
*/
#define MIN_QUEUESMAX 1
#define DFLT_QUEUESMAX 256
#define HARD_QUEUESMAX 1024
#define MIN_MSGMAX 1
#define DFLT_MSG 10U
#define DFLT_MSGMAX 10

View File

@@ -249,7 +249,8 @@ void kernfs_notify(struct kernfs_node *kn);
const void *kernfs_super_ns(struct super_block *sb);
struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
struct kernfs_root *root, const void *ns);
struct kernfs_root *root, bool *new_sb_created,
const void *ns);
void kernfs_kill_sb(struct super_block *sb);
void kernfs_init(void);
@@ -317,7 +318,7 @@ static inline const void *kernfs_super_ns(struct super_block *sb)
static inline struct dentry *
kernfs_mount_ns(struct file_system_type *fs_type, int flags,
struct kernfs_root *root, const void *ns)
struct kernfs_root *root, bool *new_sb_created, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -368,9 +369,9 @@ static inline int kernfs_remove_by_name(struct kernfs_node *parent,
static inline struct dentry *
kernfs_mount(struct file_system_type *fs_type, int flags,
struct kernfs_root *root)
struct kernfs_root *root, bool *new_sb_created)
{
return kernfs_mount_ns(fs_type, flags, root, NULL);
return kernfs_mount_ns(fs_type, flags, root, new_sb_created, NULL);
}
#endif /* __LINUX_KERNFS_H */

View File

@@ -387,7 +387,7 @@ struct max8997_dev {
struct i2c_client *muic; /* slave addr 0x4a */
struct mutex iolock;
int type;
unsigned long type;
struct platform_device *battery; /* battery control (not fuel gauge) */
int irq;

View File

@@ -163,7 +163,7 @@ struct max8998_dev {
int ono;
u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
int type;
unsigned long type;
bool wakeup;
};

View File

@@ -252,7 +252,7 @@ struct tps65217_board {
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
unsigned int id;
unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
@@ -263,7 +263,7 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
return dev_get_drvdata(dev);
}
static inline int tps65217_chip_id(struct tps65217 *tps65217)
static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
{
return tps65217->id;
}

View File

@@ -38,8 +38,10 @@
#include <linux/pci.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
* protect uuar allocation data structs
*/
struct mutex lock;
u32 ver;
};
struct mlx5_bf {

View File

@@ -752,6 +752,9 @@ struct netdev_phys_port_id {
unsigned char id_len;
};
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@ struct netdev_phys_port_id {
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv);
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
@@ -1005,7 +1008,8 @@ struct net_device_ops {
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
void *accel_priv,
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
* Net namespace inlines
@@ -2275,6 +2278,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
}
/**
* netdev_cap_txqueue - check if selected tx queue exceeds device queues
* @dev: network device
* @queue_index: given tx queue index
*
* Returns 0 if given tx queue index >= number of device tx queues,
* otherwise returns the originally passed tx queue index.
*/
static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
dev->name, queue_index,
dev->real_num_tx_queues);
return 0;
}
return queue_index;
}
/**
* netif_running - test if up
* @dev: network device
@@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
netdev_features_t netif_skb_features(struct sk_buff *skb);
netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
const struct net_device *dev);
static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
{
return netif_skb_dev_features(skb, skb->dev);
}
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{

View File

@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
#define for_each_node_by_name(dn, name) \
for (dn = of_find_node_by_name(NULL, name); dn; \
dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
#define for_each_node_by_type(dn, type) \
for (dn = of_find_node_by_type(NULL, type); dn; \
dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
#define for_each_compatible_node(dn, type, compatible) \
for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
dn = of_find_compatible_node(dn, type, compatible))
extern struct device_node *of_find_matching_node_and_match(
struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match);
static inline struct device_node *of_find_matching_node(
struct device_node *from,
const struct of_device_id *matches)
{
return of_find_matching_node_and_match(from, matches, NULL);
}
#define for_each_matching_node(dn, matches) \
for (dn = of_find_matching_node(NULL, matches); dn; \
dn = of_find_matching_node(dn, matches))
#define for_each_matching_node_and_match(dn, matches, match) \
for (dn = of_find_matching_node_and_match(NULL, matches, match); \
dn; dn = of_find_matching_node_and_match(dn, matches, match))
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child(
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
#define for_each_child_of_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
#define for_each_available_child_of_node(parent, child) \
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
static inline int of_get_child_count(const struct device_node *np)
{
struct device_node *child;
int num = 0;
for_each_child_of_node(np, child)
num++;
return num;
}
static inline int of_get_available_child_count(const struct device_node *np)
{
struct device_node *child;
int num = 0;
for_each_available_child_of_node(np, child)
num++;
return num;
}
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
extern struct property *of_find_property(const struct device_node *np,
const char *name,
@@ -367,27 +315,48 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from,
return NULL;
}
static inline struct device_node *of_find_node_by_type(struct device_node *from,
const char *type)
{
return NULL;
}
static inline struct device_node *of_find_matching_node_and_match(
struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match)
{
return NULL;
}
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
}
static inline struct device_node *of_get_next_child(
const struct device_node *node, struct device_node *prev)
{
return NULL;
}
static inline struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev)
{
return NULL;
}
static inline struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name)
{
return NULL;
}
static inline bool of_have_populated_dt(void)
{
return false;
}
/* Kill an unused variable warning on a device_node pointer */
static inline void __of_use_dn(const struct device_node *np)
{
}
#define for_each_child_of_node(parent, child) \
while (__of_use_dn(parent), __of_use_dn(child), 0)
#define for_each_available_child_of_node(parent, child) \
while (0)
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -395,16 +364,6 @@ static inline struct device_node *of_get_child_by_name(
return NULL;
}
static inline int of_get_child_count(const struct device_node *np)
{
return 0;
}
static inline int of_get_available_child_count(const struct device_node *np)
{
return 0;
}
static inline int of_device_is_compatible(const struct device_node *device,
const char *name)
{
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np);
static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
static inline struct device_node *of_find_matching_node(
struct device_node *from,
const struct of_device_id *matches)
{
return of_find_matching_node_and_match(from, matches, NULL);
}
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np,
s; \
s = of_prop_next_string(prop, s))
#define for_each_node_by_name(dn, name) \
for (dn = of_find_node_by_name(NULL, name); dn; \
dn = of_find_node_by_name(dn, name))
#define for_each_node_by_type(dn, type) \
for (dn = of_find_node_by_type(NULL, type); dn; \
dn = of_find_node_by_type(dn, type))
#define for_each_compatible_node(dn, type, compatible) \
for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
dn = of_find_compatible_node(dn, type, compatible))
#define for_each_matching_node(dn, matches) \
for (dn = of_find_matching_node(NULL, matches); dn; \
dn = of_find_matching_node(dn, matches))
#define for_each_matching_node_and_match(dn, matches, match) \
for (dn = of_find_matching_node_and_match(NULL, matches, match); \
dn; dn = of_find_matching_node_and_match(dn, matches, match))
#define for_each_child_of_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
#define for_each_available_child_of_node(parent, child) \
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
static inline int of_get_child_count(const struct device_node *np)
{
struct device_node *child;
int num = 0;
for_each_child_of_node(np, child)
num++;
return num;
}
static inline int of_get_available_child_count(const struct device_node *np)
{
struct device_node *child;
int num = 0;
for_each_available_child_of_node(np, child)
num++;
return num;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);

View File

@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev,
static inline void of_device_node_put(struct device *dev) { }
static inline const struct of_device_id *of_match_device(
static inline const struct of_device_id *__of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
#define of_match_device(matches, dev) \
__of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{

View File

@@ -1169,8 +1169,23 @@ void msi_remove_pci_irq_vectors(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
{
int rc = pci_enable_msi_range(dev, nvec, nvec);
if (rc < 0)
return rc;
return 0;
}
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
if (rc < 0)
return rc;
return 0;
}
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
@@ -1189,9 +1204,14 @@ static inline int pci_msi_enabled(void) { return 0; }
static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
int maxvec)
{ return -ENOSYS; }
static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec, int maxvec)
{ return -ENOSYS; }
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
#endif
#ifdef CONFIG_PCIEPORTBUS

View File

@@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
void phy_put(struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_simple_xlate(struct device *dev,
@@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
static inline struct phy *phy_optional_get(struct device *dev,
const char *string)
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy *devm_phy_optional_get(struct device *dev,
const char *string)
{
return ERR_PTR(-ENOSYS);
}
static inline void phy_put(struct phy *phy)
{
}

View File

@@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
return !skb->head_frag || skb_cloned(skb);
}
/**
* skb_gso_network_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_network_seglen is used to determine the real size of the
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
*
* The MAC/L2 header is not accounted for.
*/
static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) -
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */

View File

@@ -188,6 +188,9 @@ static inline void kick_all_cpus_sync(void) { }
*/
extern void arch_disable_smp_support(void);
extern void arch_enable_nonboot_cpus_begin(void);
extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */

View File

@@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* message while queuing transfers that arrive in the meantime. When the
* driver is finished with this message, it must call
* spi_finalize_current_message() so the subsystem can issue the next
* transfer
* message
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
@@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
* can issue the next transfer
* can issue the next transfer. Note: transfer_one and
* transfer_one_message are mutually exclusive; when both
* are set, the generic subsystem does not call your
* transfer_one callback.
* @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that

View File

@@ -281,13 +281,15 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setattr(pid_t pid,
struct sched_attr __user *attr);
struct sched_attr __user *attr,
unsigned int flags);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_getattr(pid_t pid,
struct sched_attr __user *attr,
unsigned int size);
unsigned int size,
unsigned int flags);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,

View File

@@ -126,6 +126,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};

View File

@@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
* (FIXME: scatter-gather under xHCI is broken for periodic transfers.
* Do not use urb->sg for interrupt endpoints for now, only bulk.)
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may

View File

@@ -419,10 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \
const char *__lock_name; \
\
if (__builtin_constant_p(fmt)) \
__lock_name = (fmt); \
else \
__lock_name = #fmt; \
__lock_name = #fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \

View File

@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);