Merge branch 'clk-fixes' into clk-next

* clk-fixes: (3 commits)
  clk: ti: dflt: fix enable_reg validity check
  clk: ti: fix dual-registration of uart4_ick
  clk: ti: clk-7xx: Remove hardwired ABE clock configuration
This commit is contained in:
Stephen Boyd
2015-10-02 11:29:54 -07:00
884 changed files with 6852 additions and 4020 deletions

View File

@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits);
* @inode: inode of interest
*
* cgroup writeback requires support from both the bdi and filesystem.
* Test whether @inode has both.
* Also, both memcg and iocg have to be on the default hierarchy. Test
* whether all conditions are met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
*/
static inline bool inode_cgwb_enabled(struct inode *inode)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
return bdi_cap_account_dirty(bdi) &&
return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
cgroup_on_dfl(blkcg_root_css->cgroup) &&
bdi_cap_account_dirty(bdi) &&
(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}

View File

@@ -1368,6 +1368,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
if (!bio_has_data(prev))
return false;
return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
next->bi_io_vec[0].bv_offset);
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, bio, req->bio);
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1494,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1560,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
return false;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

View File

@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \

View File

@@ -238,6 +238,8 @@ struct ceph_connection {
bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
stamp */
/* message in temps */
struct ceph_msg_header in_hdr;
@@ -248,7 +250,7 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
struct timespec last_keepalive_ack;
struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */

View File

@@ -473,31 +473,8 @@ struct cgroup_subsys {
unsigned int depends_on;
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_begin() and allows cgroup operations to
* synchronize against threadgroup changes using a percpu_rw_semaphore.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_end(). Counterpart of
* cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
percpu_up_read(&cgroup_threadgroup_rwsem);
}
void cgroup_threadgroup_change_begin(struct task_struct *tsk);
void cgroup_threadgroup_change_end(struct task_struct *tsk);
#else /* CONFIG_CGROUPS */

View File

@@ -18,15 +18,6 @@
struct clock_event_device;
struct module;
/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
enum clock_event_mode {
CLOCK_EVT_MODE_UNUSED,
CLOCK_EVT_MODE_SHUTDOWN,
CLOCK_EVT_MODE_PERIODIC,
CLOCK_EVT_MODE_ONESHOT,
CLOCK_EVT_MODE_RESUME,
};
/*
* Possible states of a clock event device.
*
@@ -86,16 +77,14 @@ enum clock_event_state {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
* @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
* @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
* @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
* @set_state_periodic: switch state to periodic, if !set_mode
* @set_state_oneshot: switch state to oneshot, if !set_mode
* @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
* @set_state_shutdown: switch state to shutdown, if !set_mode
* @tick_resume: resume clkevt device, if !set_mode
* @set_state_periodic: switch state to periodic
* @set_state_oneshot: switch state to oneshot
* @set_state_oneshot_stopped: switch state to oneshot_stopped
* @set_state_shutdown: switch state to shutdown
* @tick_resume: resume clkevt device
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
u64 min_delta_ns;
u32 mult;
u32 shift;
enum clock_event_mode mode;
enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
/*
* State transition callback(s): Only one of the two groups should be
* defined:
* - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
* - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
*/
void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
int (*set_state_oneshot_stopped)(struct clock_event_device *);

View File

@@ -127,9 +127,14 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
return NULL;
}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;

View File

@@ -65,7 +65,10 @@ struct devfreq_dev_status {
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
* @get_dev_status: The device should provide the current performance
* status to devfreq, which is used by governors.
* status to devfreq. Governors are recommended not to
* use this directly. Instead, governors are recommended
* to use devfreq_update_stats() along with
* devfreq.last_status.
* @get_cur_freq: The device should provide the current frequency
* at which it is operating.
* @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
struct delayed_work work;
unsigned long previous_freq;
struct devfreq_dev_status last_status;
void *data; /* private data for governors */
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
/**
* devfreq_update_stats() - update the last_status pointer in struct devfreq
* @df: the devfreq instance whose status needs updating
*
* Governors are recommended to use this function along with last_status,
* which allows other entities to reuse the last_status without affecting
* the values fetched later by governors.
*/
static inline int devfreq_update_stats(struct devfreq *df)
{
return df->profile->get_dev_status(df->dev.parent, &df->last_status);
}
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
}
static inline int devfreq_update_stats(struct devfreq *df)
{
return -EINVAL;
}
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */

View File

@@ -25,6 +25,13 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS
#define INIT_GROUP_RWSEM(sig) \
.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
#else
#define INIT_GROUP_RWSEM(sig)
#endif
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;

View File

@@ -110,8 +110,8 @@ enum {
/*
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
* struct irq_common_data - per irq data shared by all irqchips
* @state_use_accessors: status information for irq chip functions.
* Use accessor functions to deal with it
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
* @affinity: IRQ affinity on SMP
* @msi_desc: MSI descriptor
*/
struct irq_common_data {
unsigned int state_use_accessors;
#ifdef CONFIG_NUMA
unsigned int node;
#endif
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
};
/**
@@ -139,38 +149,26 @@ struct irq_common_data {
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
* @node: node index useful for balancing
* @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
* @parent_data: pointer to parent struct irq_data to support hierarchy
* irq_domain
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @msi_desc: MSI descriptor
* @affinity: IRQ affinity on SMP
*
* The fields here need to overlay the ones in irq_desc until we
* cleaned up the direct references and switched everything over to
* irq_data.
*/
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
unsigned int node;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_data *parent_data;
#endif
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
};
/*
@@ -190,6 +188,7 @@ struct irq_data {
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
}
static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
}
static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
{
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
/*
* Functions for chained handlers which can be enabled/disabled by the
@@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
* Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq()
*/
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_level_irq(struct irq_desc *desc);
extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
extern void handle_percpu_irq(struct irq_desc *desc);
extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->handler_data : NULL;
return d ? d->common->handler_data : NULL;
}
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
return d->handler_data;
return d->common->handler_data;
}
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->msi_desc : NULL;
return d ? d->common->msi_desc : NULL;
}
static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->msi_desc;
return d->common->msi_desc;
}
static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
static inline int irq_common_data_get_node(struct irq_common_data *d)
{
#ifdef CONFIG_NUMA
return d->node;
#else
return 0;
#endif
}
static inline int irq_data_get_node(struct irq_data *d)
{
return d->node;
return irq_common_data_get_node(d->common);
}
static inline struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->affinity : NULL;
return d ? d->common->affinity : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
return d->affinity;
return d->common->affinity;
}
unsigned int arch_dynirq_lower_bound(unsigned int from);

View File

@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
return irq_to_desc(data->irq);
#else
return container_of(data, struct irq_desc, irq_data);
#endif
return container_of(data->common, struct irq_desc, irq_common_data);
}
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
return desc->irq_data.handler_data;
return desc->irq_common_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
return desc->irq_data.msi_desc;
return desc->irq_common_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
* handle an interrupt.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
desc->handle_irq(desc);
}
int generic_handle_irq(unsigned int irq);
@@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq)
return irq_desc_has_action(irq_to_desc(irq));
}
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
/* caller has locked the irq_desc and both params are valid */
static inline void
__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
irq_desc_get_irq_data(desc)->chip = chip;
desc->handle_irq = handler;
desc->name = name;
}
/**
* irq_set_handler_locked - Set irq handler from a locked region
* @data: Pointer to the irq_data structure which identifies the irq

View File

@@ -8,7 +8,7 @@
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif

View File

@@ -21,8 +21,8 @@
*
* DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key);
* static_key_likely()
* statick_key_unlikely()
* static_branch_likely()
* static_branch_unlikely()
*
* Jump labels provide an interface to generate dynamic branches using
* self-modifying code. Assuming toolchain and architecture support, if we
@@ -45,12 +45,10 @@
* statement, setting the key to true requires us to patch in a jump
* to the out-of-line of true branch.
*
* In addtion to static_branch_{enable,disable}, we can also reference count
* In addition to static_branch_{enable,disable}, we can also reference count
* the key or branch direction via static_branch_{inc,dec}. Thus,
* static_branch_inc() can be thought of as a 'make more true' and
* static_branch_dec() as a 'make more false'. The inc()/dec()
* interface is meant to be used exclusively from the inc()/dec() for a given
* key.
* static_branch_dec() as a 'make more false'.
*
* Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).

View File

@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
clear_bit(NAPI_STATE_NPSVC, &n->state);
}
#ifdef CONFIG_SMP

View File

@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
* PHYs should register using this structure
*/
struct mii_bus {
struct module *owner;
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
return mdiobus_alloc_size(0);
}
int mdiobus_register(struct mii_bus *bus);
int __mdiobus_register(struct mii_bus *bus, struct module *owner);
#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
struct phy_c45_device_ids *c45_ids);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);

View File

@@ -762,6 +762,18 @@ struct signal_struct {
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
#ifdef CONFIG_CGROUPS
/*
* group_rwsem prevents new tasks from entering the threadgroup and
* member tasks from exiting,a more specifically, setting of
* PF_EXITING. fork and exit paths are protected with this rwsem
* using threadgroup_change_begin/end(). Users which require
* threadgroup to remain stable should use threadgroup_[un]lock()
* which also takes care of exec path. Currently, cgroup is the
* only user.
*/
struct rw_semaphore group_rwsem;
#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */

View File

@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg4,
unsigned long arg5)
{
return cap_task_prctl(option, arg2, arg3, arg3, arg5);
return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)

View File

@@ -179,6 +179,9 @@ struct nf_bridge_info {
u8 bridged_dnat:1;
__u16 frag_max_size;
struct net_device *physindev;
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
union {
/* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
@@ -189,9 +192,6 @@ struct nf_bridge_info {
* skb is out in neigh layer.
*/
char neigh_header[8];
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
};
};
#endif
@@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) <= len)
skb->ip_summed = CHECKSUM_NONE;
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);

View File

@@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
* @clock: lock protecting this structure
* @lock: lock protecting this structure
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled

View File

@@ -42,6 +42,7 @@ struct sock_xprt {
/*
* Connection of transports
*/
unsigned long sock_state;
struct delayed_work connect_worker;
struct sockaddr_storage srcaddr;
unsigned short srcport;
@@ -76,6 +77,8 @@ struct sock_xprt {
*/
#define TCP_RPC_REPLY (1UL << 6)
#define XPRT_SOCK_CONNECTING 1U
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */

View File

@@ -360,7 +360,7 @@ static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops)
{
return NULL;
return ERR_PTR(-ENODEV);
}
static inline
@@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
int power_actor_get_max_power(struct thermal_cooling_device *,
struct thermal_zone_device *tz, u32 *max_power);
int power_actor_get_min_power(struct thermal_cooling_device *,
struct thermal_zone_device *tz, u32 *min_power);
int power_actor_set_power(struct thermal_cooling_device *,
struct thermal_instance *, u32);
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 *max_power)
{ return 0; }
static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz,
u32 *min_power)
{ return -ENODEV; }
static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
struct thermal_instance *tz, u32 power)
{ return 0; }

View File

@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
static inline int housekeeping_any_cpu(void)
{
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
}
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(void);
#else
static inline int housekeeping_any_cpu(void)
{
return smp_processor_id();
}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }

View File

@@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
typedef int wait_bit_action_f(struct wait_bit_key *);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, (void *) (m))
#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
#define wake_up_interruptible_sync_poll(x, m) \