Merge branch 'sched/urgent' into sched/core, to merge fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
|
||||
*********************************************************************/
|
||||
|
||||
/* Special Values of .frequency field */
|
||||
#define CPUFREQ_ENTRY_INVALID ~0
|
||||
#define CPUFREQ_TABLE_END ~1
|
||||
#define CPUFREQ_ENTRY_INVALID ~0u
|
||||
#define CPUFREQ_TABLE_END ~1u
|
||||
/* Special Values of .flags field */
|
||||
#define CPUFREQ_BOOST_FREQ (1 << 0)
|
||||
|
||||
|
@@ -593,6 +593,7 @@ struct ata_host {
|
||||
struct device *dev;
|
||||
void __iomem * const *iomap;
|
||||
unsigned int n_ports;
|
||||
unsigned int n_tags; /* nr of NCQ tags */
|
||||
void *private_data;
|
||||
struct ata_port_operations *ops;
|
||||
unsigned long flags;
|
||||
|
@@ -578,8 +578,6 @@ struct mlx4_cq {
|
||||
u32 cons_index;
|
||||
|
||||
u16 irq;
|
||||
bool irq_affinity_change;
|
||||
|
||||
__be32 *set_ci_db;
|
||||
__be32 *arm_db;
|
||||
int arm_sn;
|
||||
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
|
||||
int *vector);
|
||||
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
|
||||
|
||||
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
|
||||
|
||||
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
|
||||
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
|
||||
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/osq_lock.h>
|
||||
|
||||
/*
|
||||
* Simple, straightforward mutexes with strict semantics:
|
||||
@@ -46,7 +47,6 @@
|
||||
* - detects multi-task circular deadlocks and prints out all affected
|
||||
* locks and tasks (and only those tasks)
|
||||
*/
|
||||
struct optimistic_spin_queue;
|
||||
struct mutex {
|
||||
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
|
||||
atomic_t count;
|
||||
@@ -56,7 +56,7 @@ struct mutex {
|
||||
struct task_struct *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
struct optimistic_spin_queue *osq; /* Spinner MCS lock */
|
||||
struct optimistic_spin_queue osq; /* Spinner MCS lock */
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
const char *name;
|
||||
|
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
|
||||
|
||||
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
|
||||
|
||||
extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
|
||||
struct phy_device *phydev);
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
||||
{
|
||||
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
|
||||
struct phy_device *phydev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
|
||||
|
27
include/linux/osq_lock.h
Normal file
27
include/linux/osq_lock.h
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifndef __LINUX_OSQ_LOCK_H
|
||||
#define __LINUX_OSQ_LOCK_H
|
||||
|
||||
/*
|
||||
* An MCS like lock especially tailored for optimistic spinning for sleeping
|
||||
* lock implementations (mutex, rwsem, etc).
|
||||
*/
|
||||
|
||||
#define OSQ_UNLOCKED_VAL (0)
|
||||
|
||||
struct optimistic_spin_queue {
|
||||
/*
|
||||
* Stores an encoded value of the CPU # of the tail node in the queue.
|
||||
* If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
|
||||
*/
|
||||
atomic_t tail;
|
||||
};
|
||||
|
||||
/* Init macro and function. */
|
||||
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
|
||||
|
||||
static inline void osq_lock_init(struct optimistic_spin_queue *lock)
|
||||
{
|
||||
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
|
||||
}
|
||||
|
||||
#endif
|
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
|
||||
return read_cache_page(mapping, index, filler, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the offset in PAGE_SIZE.
|
||||
* (TODO: hugepage should have ->index in PAGE_SIZE)
|
||||
*/
|
||||
static inline pgoff_t page_to_pgoff(struct page *page)
|
||||
{
|
||||
if (unlikely(PageHeadHuge(page)))
|
||||
return page->index << compound_order(page);
|
||||
else
|
||||
return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return byte-offset into filesystem object for page.
|
||||
*/
|
||||
|
@@ -44,7 +44,6 @@
|
||||
#include <linux/debugobjects.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
extern int rcu_expedited; /* for sysctl */
|
||||
@@ -299,41 +298,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
|
||||
bool __rcu_is_watching(void);
|
||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
||||
|
||||
/*
|
||||
* Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
|
||||
*/
|
||||
|
||||
#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
|
||||
DECLARE_PER_CPU(int, rcu_cond_resched_count);
|
||||
void rcu_resched(void);
|
||||
|
||||
/*
|
||||
* Is it time to report RCU quiescent states?
|
||||
*
|
||||
* Note unsynchronized access to rcu_cond_resched_count. Yes, we might
|
||||
* increment some random CPU's count, and possibly also load the result from
|
||||
* yet another CPU's count. We might even clobber some other CPU's attempt
|
||||
* to zero its counter. This is all OK because the goal is not precision,
|
||||
* but rather reasonable amortization of rcu_note_context_switch() overhead
|
||||
* and extremely high probability of avoiding RCU CPU stall warnings.
|
||||
* Note that this function has to be preempted in just the wrong place,
|
||||
* many thousands of times in a row, for anything bad to happen.
|
||||
*/
|
||||
static inline bool rcu_should_resched(void)
|
||||
{
|
||||
return raw_cpu_inc_return(rcu_cond_resched_count) >=
|
||||
RCU_COND_RESCHED_LIM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report quiscent states to RCU if it is time to do so.
|
||||
*/
|
||||
static inline void rcu_cond_resched(void)
|
||||
{
|
||||
if (unlikely(rcu_should_resched()))
|
||||
rcu_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* Infrastructure to implement the synchronize_() primitives in
|
||||
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
|
||||
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
|
||||
* initialization.
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
|
||||
void init_rcu_head(struct rcu_head *head);
|
||||
void destroy_rcu_head(struct rcu_head *head);
|
||||
void init_rcu_head_on_stack(struct rcu_head *head);
|
||||
void destroy_rcu_head_on_stack(struct rcu_head *head);
|
||||
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
static inline void init_rcu_head(struct rcu_head *head)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void destroy_rcu_head(struct rcu_head *head)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_rcu_head_on_stack(struct rcu_head *head)
|
||||
{
|
||||
}
|
||||
|
@@ -15,13 +15,13 @@
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* the rw-semaphore definition
|
||||
* - if activity is 0 then there are no active readers or writers
|
||||
* - if activity is +ve then that is the number of active readers
|
||||
* - if activity is -1 then there is one active writer
|
||||
* - if count is 0 then there are no active readers or writers
|
||||
* - if count is +ve then that is the number of active readers
|
||||
* - if count is -1 then there is one active writer
|
||||
* - if wait_list is not empty, then there are processes waiting for the semaphore
|
||||
*/
|
||||
struct rw_semaphore {
|
||||
__s32 activity;
|
||||
__s32 count;
|
||||
raw_spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
@@ -13,10 +13,11 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
||||
#include <linux/osq_lock.h>
|
||||
#endif
|
||||
|
||||
struct optimistic_spin_queue;
|
||||
struct rw_semaphore;
|
||||
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
@@ -25,15 +26,15 @@ struct rw_semaphore;
|
||||
/* All arch specific implementations share the same struct */
|
||||
struct rw_semaphore {
|
||||
long count;
|
||||
raw_spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_SMP
|
||||
raw_spinlock_t wait_lock;
|
||||
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
||||
struct optimistic_spin_queue osq; /* spinner MCS lock */
|
||||
/*
|
||||
* Write owner. Used as a speculative check to see
|
||||
* if the owner is running on the cpu.
|
||||
*/
|
||||
struct task_struct *owner;
|
||||
struct optimistic_spin_queue *osq; /* spinner MCS lock */
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ RWSEM_UNLOCKED_VALUE, \
|
||||
__RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
||||
LIST_HEAD_INIT((name).wait_list), \
|
||||
NULL, /* owner */ \
|
||||
NULL /* mcs lock */ \
|
||||
__RWSEM_DEP_MAP_INIT(name) }
|
||||
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
||||
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
|
||||
#else
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ RWSEM_UNLOCKED_VALUE, \
|
||||
__RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
||||
LIST_HEAD_INIT((name).wait_list) \
|
||||
__RWSEM_DEP_MAP_INIT(name) }
|
||||
#define __RWSEM_OPT_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ .count = RWSEM_UNLOCKED_VALUE, \
|
||||
.wait_list = LIST_HEAD_INIT((name).wait_list), \
|
||||
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
|
||||
__RWSEM_OPT_INIT(name) \
|
||||
__RWSEM_DEP_MAP_INIT(name) }
|
||||
|
||||
#define DECLARE_RWSEM(name) \
|
||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
||||
|
||||
|
@@ -872,21 +872,21 @@ enum cpu_idle_type {
|
||||
#define SD_NUMA 0x4000 /* cross-node balancing */
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
static inline const int cpu_smt_flags(void)
|
||||
static inline int cpu_smt_flags(void)
|
||||
{
|
||||
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
static inline const int cpu_core_flags(void)
|
||||
static inline int cpu_core_flags(void)
|
||||
{
|
||||
return SD_SHARE_PKG_RESOURCES;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static inline const int cpu_numa_flags(void)
|
||||
static inline int cpu_numa_flags(void)
|
||||
{
|
||||
return SD_NUMA;
|
||||
}
|
||||
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
|
||||
bool cpus_share_cache(int this_cpu, int that_cpu);
|
||||
|
||||
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
||||
typedef const int (*sched_domain_flags_f)(void);
|
||||
typedef int (*sched_domain_flags_f)(void);
|
||||
|
||||
#define SDTL_OVERLAP 0x01
|
||||
|
||||
|
Reference in New Issue
Block a user