Merge tag 'v3.12' into core/locking to pick up mutex upates
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* isolated_balloon_page - identify an isolated balloon page on private
|
||||
* compaction/migration page lists.
|
||||
*
|
||||
* After a compaction thread isolates a balloon page for migration, it raises
|
||||
* the page refcount to prevent concurrent compaction threads from re-isolating
|
||||
* the same page. For that reason putback_movable_pages(), or other routines
|
||||
* that need to identify isolated balloon pages on private pagelists, cannot
|
||||
* rely on balloon_page_movable() to accomplish the task.
|
||||
*/
|
||||
static inline bool isolated_balloon_page(struct page *page)
|
||||
{
|
||||
/* Already isolated balloon pages, by default, have a raised refcount */
|
||||
if (page_flags_cleared(page) && !page_mapped(page) &&
|
||||
page_count(page) >= 2)
|
||||
return __is_movable_balloon_page(page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_insert - insert a page into the balloon's page list and make
|
||||
* the page->mapping assignment accordingly.
|
||||
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool isolated_balloon_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool balloon_page_isolate(struct page *page)
|
||||
{
|
||||
return false;
|
||||
|
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
|
||||
struct bcma_device *core, bool enable);
|
||||
extern void bcma_core_pci_up(struct bcma_bus *bus);
|
||||
extern void bcma_core_pci_down(struct bcma_bus *bus);
|
||||
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
|
||||
|
||||
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
|
||||
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
|
||||
|
@@ -65,6 +65,21 @@
|
||||
#define __visible __attribute__((externally_visible))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* GCC 'asm goto' miscompiles certain code sequences:
|
||||
*
|
||||
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
|
||||
*
|
||||
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
|
||||
* Fixed in GCC 4.8.2 and later versions.
|
||||
*
|
||||
* (asm goto is automatically volatile - the naming reflects this.)
|
||||
*/
|
||||
#if GCC_VERSION <= 40801
|
||||
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
|
||||
#else
|
||||
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
|
||||
#if GCC_VERSION >= 40400
|
||||
|
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
|
||||
union map_info *dm_get_mapinfo(struct bio *bio);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
|
||||
|
||||
/*
|
||||
* Geometry functions.
|
||||
*/
|
||||
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
||||
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
||||
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Functions for manipulating device-mapper tables.
|
||||
*---------------------------------------------------------------*/
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <uapi/linux/filter.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@@ -25,15 +26,19 @@ struct sk_filter
|
||||
{
|
||||
atomic_t refcnt;
|
||||
unsigned int len; /* Number of filter blocks */
|
||||
struct rcu_head rcu;
|
||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||
const struct sock_filter *filter);
|
||||
struct rcu_head rcu;
|
||||
struct sock_filter insns[0];
|
||||
union {
|
||||
struct sock_filter insns[0];
|
||||
struct work_struct work;
|
||||
};
|
||||
};
|
||||
|
||||
static inline unsigned int sk_filter_len(const struct sk_filter *fp)
|
||||
static inline unsigned int sk_filter_size(unsigned int proglen)
|
||||
{
|
||||
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
|
||||
return max(sizeof(struct sk_filter),
|
||||
offsetof(struct sk_filter, insns[proglen]));
|
||||
}
|
||||
|
||||
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
}
|
||||
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
|
||||
#else
|
||||
#include <linux/slab.h>
|
||||
static inline void bpf_jit_compile(struct sk_filter *fp)
|
||||
{
|
||||
}
|
||||
static inline void bpf_jit_free(struct sk_filter *fp)
|
||||
{
|
||||
kfree(fp);
|
||||
}
|
||||
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
|
||||
#endif
|
||||
|
@@ -30,10 +30,13 @@
|
||||
/*
|
||||
* Framework version for util services.
|
||||
*/
|
||||
#define UTIL_FW_MINOR 0
|
||||
|
||||
#define UTIL_WS2K8_FW_MAJOR 1
|
||||
#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
|
||||
#define UTIL_FW_MAJOR 3
|
||||
#define UTIL_FW_MINOR 0
|
||||
#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
|
||||
|
||||
/*
|
||||
|
@@ -55,7 +55,7 @@
|
||||
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
|
||||
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
|
||||
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
|
||||
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
|
||||
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
|
||||
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
|
||||
|
||||
#define OFFSET_STRIDE (9)
|
||||
|
@@ -34,9 +34,9 @@ struct ipc_namespace {
|
||||
int sem_ctls[4];
|
||||
int used_sems;
|
||||
|
||||
int msg_ctlmax;
|
||||
int msg_ctlmnb;
|
||||
int msg_ctlmni;
|
||||
unsigned int msg_ctlmax;
|
||||
unsigned int msg_ctlmnb;
|
||||
unsigned int msg_ctlmni;
|
||||
atomic_t msg_bytes;
|
||||
atomic_t msg_hdrs;
|
||||
int auto_msgmni;
|
||||
|
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
|
||||
return buf;
|
||||
}
|
||||
|
||||
extern const char hex_asc_upper[];
|
||||
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
|
||||
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
|
||||
|
||||
static inline char *hex_byte_pack_upper(char *buf, u8 byte)
|
||||
{
|
||||
*buf++ = hex_asc_upper_hi(byte);
|
||||
*buf++ = hex_asc_upper_lo(byte);
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
|
||||
{
|
||||
return hex_byte_pack(buf, byte);
|
||||
|
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
|
||||
unsigned int generation;
|
||||
};
|
||||
|
||||
enum mem_cgroup_filter_t {
|
||||
VISIT, /* visit current node */
|
||||
SKIP, /* skip the current node and continue traversal */
|
||||
SKIP_TREE, /* skip the whole subtree and continue traversal */
|
||||
};
|
||||
|
||||
/*
|
||||
* mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
|
||||
* iterate through the hierarchy tree. Each tree element is checked by the
|
||||
* predicate before it is returned by the iterator. If a filter returns
|
||||
* SKIP or SKIP_TREE then the iterator code continues traversal (with the
|
||||
* next node down the hierarchy or the next node that doesn't belong under the
|
||||
* memcg's subtree).
|
||||
*/
|
||||
typedef enum mem_cgroup_filter_t
|
||||
(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* All "charge" functions with gfp_mask should use GFP_KERNEL or
|
||||
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
||||
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok);
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond);
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim)
|
||||
{
|
||||
return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
|
||||
struct mem_cgroup *,
|
||||
struct mem_cgroup_reclaim_cookie *);
|
||||
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
|
||||
|
||||
/*
|
||||
@@ -163,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
|
||||
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
|
||||
struct page *newpage);
|
||||
|
||||
/**
|
||||
* mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
|
||||
* @new: true to enable, false to disable
|
||||
*
|
||||
* Toggle whether a failed memcg charge should invoke the OOM killer
|
||||
* or just return -ENOMEM. Returns the previous toggle state.
|
||||
*
|
||||
* NOTE: Any path that enables the OOM killer before charging must
|
||||
* call mem_cgroup_oom_synchronize() afterward to finalize the
|
||||
* OOM handling and clean up.
|
||||
*/
|
||||
static inline bool mem_cgroup_toggle_oom(bool new)
|
||||
static inline void mem_cgroup_oom_enable(void)
|
||||
{
|
||||
bool old;
|
||||
|
||||
old = current->memcg_oom.may_oom;
|
||||
current->memcg_oom.may_oom = new;
|
||||
|
||||
return old;
|
||||
WARN_ON(current->memcg_oom.may_oom);
|
||||
current->memcg_oom.may_oom = 1;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_enable_oom(void)
|
||||
static inline void mem_cgroup_oom_disable(void)
|
||||
{
|
||||
bool old = mem_cgroup_toggle_oom(true);
|
||||
|
||||
WARN_ON(old == true);
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_disable_oom(void)
|
||||
{
|
||||
bool old = mem_cgroup_toggle_oom(false);
|
||||
|
||||
WARN_ON(old == false);
|
||||
WARN_ON(!current->memcg_oom.may_oom);
|
||||
current->memcg_oom.may_oom = 0;
|
||||
}
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
{
|
||||
return p->memcg_oom.in_memcg_oom;
|
||||
return p->memcg_oom.memcg;
|
||||
}
|
||||
|
||||
bool mem_cgroup_oom_synchronize(void);
|
||||
bool mem_cgroup_oom_synchronize(bool wait);
|
||||
|
||||
#ifdef CONFIG_MEMCG_SWAP
|
||||
extern int do_swap_account;
|
||||
@@ -260,9 +211,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
mem_cgroup_update_page_stat(page, idx, -1);
|
||||
}
|
||||
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root);
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned);
|
||||
|
||||
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
|
||||
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
|
||||
@@ -376,15 +327,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||
{
|
||||
}
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond)
|
||||
{
|
||||
/* first call must return non-NULL, second return NULL */
|
||||
return (struct mem_cgroup *)(unsigned long)!prev;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter(struct mem_cgroup *root,
|
||||
@@ -437,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_toggle_oom(bool new)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_enable_oom(void)
|
||||
static inline void mem_cgroup_oom_enable(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_disable_oom(void)
|
||||
static inline void mem_cgroup_oom_disable(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -455,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_oom_synchronize(void)
|
||||
static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -471,11 +408,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
}
|
||||
|
||||
static inline
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root)
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned)
|
||||
{
|
||||
return VISIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_split_huge_fixup(struct page *head)
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#define MAPPER_CTRL_MINOR 236
|
||||
#define LOOP_CTRL_MINOR 237
|
||||
#define VHOST_NET_MINOR 238
|
||||
#define UHID_MINOR 239
|
||||
#define MISC_DYNAMIC_MINOR 255
|
||||
|
||||
struct device;
|
||||
|
@@ -181,7 +181,7 @@ enum {
|
||||
MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
|
||||
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
|
||||
MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
|
||||
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
|
||||
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -417,7 +417,7 @@ struct mlx5_init_seg {
|
||||
struct health_buffer health;
|
||||
__be32 rsvd2[884];
|
||||
__be32 health_counter;
|
||||
__be32 rsvd3[1023];
|
||||
__be32 rsvd3[1019];
|
||||
__be64 ieee1588_clk;
|
||||
__be32 ieee1588_clk_type;
|
||||
__be32 clr_intx;
|
||||
|
@@ -82,7 +82,7 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_EQ_NAME = 20
|
||||
MLX5_MAX_EQ_NAME = 32
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
|
||||
|
||||
enum {
|
||||
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
|
||||
MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
|
||||
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
|
||||
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -758,7 +757,6 @@ enum {
|
||||
struct mlx5_profile {
|
||||
u64 mask;
|
||||
u32 log_max_qp;
|
||||
int cmdif_csum;
|
||||
struct {
|
||||
int size;
|
||||
int limit;
|
||||
|
@@ -15,8 +15,8 @@
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* Simple, straightforward mutexes with strict semantics:
|
||||
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
|
||||
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
#define arch_mutex_cpu_relax() cpu_relax()
|
||||
#ifndef arch_mutex_cpu_relax
|
||||
# define arch_mutex_cpu_relax() cpu_relax()
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -2264,11 +2264,12 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
|
||||
extern int netif_set_xps_queue(struct net_device *dev,
|
||||
const struct cpumask *mask,
|
||||
u16 index);
|
||||
#else
|
||||
static inline int netif_set_xps_queue(struct net_device *dev,
|
||||
struct cpumask *mask,
|
||||
const struct cpumask *mask,
|
||||
u16 index)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
|
||||
struct inode * (*open_context) (struct inode *dir,
|
||||
struct nfs_open_context *ctx,
|
||||
int open_flags,
|
||||
struct iattr *iattr);
|
||||
struct iattr *iattr,
|
||||
int *);
|
||||
int (*have_delegation)(struct inode *, fmode_t);
|
||||
int (*return_delegation)(struct inode *);
|
||||
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
|
||||
|
@@ -1,8 +1,6 @@
|
||||
#ifndef __OF_IRQ_H
|
||||
#define __OF_IRQ_H
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
struct of_irq;
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/irq.h>
|
||||
@@ -10,14 +8,6 @@ struct of_irq;
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
/*
|
||||
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
|
||||
* implements it differently. However, the prototype is the same for all,
|
||||
* so declare it here regardless of the CONFIG_OF_IRQ setting.
|
||||
*/
|
||||
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
|
||||
|
||||
#if defined(CONFIG_OF_IRQ)
|
||||
/**
|
||||
* of_irq - container for device_node/irq_specifier pair for an irq controller
|
||||
* @controller: pointer to interrupt controller device tree node
|
||||
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
|
||||
extern int of_irq_count(struct device_node *dev);
|
||||
extern int of_irq_to_resource_table(struct device_node *dev,
|
||||
struct resource *res, int nr_irqs);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
|
||||
extern void of_irq_init(const struct of_device_id *matches);
|
||||
|
||||
#endif /* CONFIG_OF_IRQ */
|
||||
#if defined(CONFIG_OF)
|
||||
/*
|
||||
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
|
||||
* implements it differently. However, the prototype is the same for all,
|
||||
* so declare it here regardless of the CONFIG_OF_IRQ setting.
|
||||
*/
|
||||
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
|
||||
#else /* !CONFIG_OF */
|
||||
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
|
||||
|
@@ -1,14 +0,0 @@
|
||||
#ifndef __OF_RESERVED_MEM_H
|
||||
#define __OF_RESERVED_MEM_H
|
||||
|
||||
#ifdef CONFIG_OF_RESERVED_MEM
|
||||
void of_reserved_mem_device_init(struct device *dev);
|
||||
void of_reserved_mem_device_release(struct device *dev);
|
||||
void early_init_dt_scan_reserved_mem(void);
|
||||
#else
|
||||
static inline void of_reserved_mem_device_init(struct device *dev) { }
|
||||
static inline void of_reserved_mem_device_release(struct device *dev) { }
|
||||
static inline void early_init_dt_scan_reserved_mem(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* __OF_RESERVED_MEM_H */
|
@@ -332,7 +332,7 @@ do { \
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_sub
|
||||
# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
|
||||
# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_inc
|
||||
@@ -418,7 +418,7 @@ do { \
|
||||
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
|
||||
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
|
||||
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
|
||||
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
|
||||
|
||||
@@ -586,7 +586,7 @@ do { \
|
||||
#endif
|
||||
|
||||
#ifndef __this_cpu_sub
|
||||
# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
|
||||
# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
|
||||
#endif
|
||||
|
||||
#ifndef __this_cpu_inc
|
||||
@@ -668,7 +668,7 @@ do { \
|
||||
__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
|
||||
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
|
||||
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
|
||||
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
|
||||
|
||||
|
@@ -294,9 +294,31 @@ struct ring_buffer;
|
||||
*/
|
||||
struct perf_event {
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct list_head group_entry;
|
||||
/*
|
||||
* entry onto perf_event_context::event_list;
|
||||
* modifications require ctx->lock
|
||||
* RCU safe iterations.
|
||||
*/
|
||||
struct list_head event_entry;
|
||||
|
||||
/*
|
||||
* XXX: group_entry and sibling_list should be mutually exclusive;
|
||||
* either you're a sibling on a group, or you're the group leader.
|
||||
* Rework the code to always use the same list element.
|
||||
*
|
||||
* Locked for modification by both ctx->mutex and ctx->lock; holding
|
||||
* either sufficies for read.
|
||||
*/
|
||||
struct list_head group_entry;
|
||||
struct list_head sibling_list;
|
||||
|
||||
/*
|
||||
* We need storage to track the entries in perf_pmu_migrate_context; we
|
||||
* cannot use the event_entry because of RCU and we want to keep the
|
||||
* group in tact which avoids us using the other two entries.
|
||||
*/
|
||||
struct list_head migrate_entry;
|
||||
|
||||
struct hlist_node hlist_entry;
|
||||
int nr_siblings;
|
||||
int group_flags;
|
||||
|
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
|
||||
extern void get_random_bytes(void *buf, int nbytes);
|
||||
extern void get_random_bytes_arch(void *buf, int nbytes);
|
||||
void generate_random_uuid(unsigned char uuid_out[16]);
|
||||
extern int random_int_secret_init(void);
|
||||
|
||||
#ifndef MODULE
|
||||
extern const struct file_operations random_fops, urandom_fops;
|
||||
|
@@ -40,6 +40,8 @@ enum regulator_status {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct regulator_linear_range - specify linear voltage ranges
|
||||
*
|
||||
* Specify a range of voltages for regulator_map_linar_range() and
|
||||
* regulator_list_linear_range().
|
||||
*
|
||||
|
@@ -1394,11 +1394,10 @@ struct task_struct {
|
||||
} memcg_batch;
|
||||
unsigned int memcg_kmem_skip_account;
|
||||
struct memcg_oom_info {
|
||||
struct mem_cgroup *memcg;
|
||||
gfp_t gfp_mask;
|
||||
int order;
|
||||
unsigned int may_oom:1;
|
||||
unsigned int in_memcg_oom:1;
|
||||
unsigned int oom_locked:1;
|
||||
int wakeups;
|
||||
struct mem_cgroup *wait_on_memcg;
|
||||
} memcg_oom;
|
||||
#endif
|
||||
#ifdef CONFIG_UPROBES
|
||||
|
@@ -498,7 +498,7 @@ struct sk_buff {
|
||||
* headers if needed
|
||||
*/
|
||||
__u8 encapsulation:1;
|
||||
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
|
||||
/* 6/8 bit hole (depending on ndisc_nodetype presence) */
|
||||
kmemcheck_bitfield_end(flags2);
|
||||
|
||||
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
|
||||
|
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
||||
|
||||
static inline void kick_all_cpus_sync(void) { }
|
||||
|
||||
static inline void __smp_call_function_single(int cpuid,
|
||||
struct call_single_data *data, int wait)
|
||||
{
|
||||
on_each_cpu(data->func, data->info, wait);
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
||||
/*
|
||||
|
@@ -1,19 +0,0 @@
|
||||
#ifndef __LINUX_TC_DEF_H
|
||||
#define __LINUX_TC_DEF_H
|
||||
|
||||
#include <linux/pkt_cls.h>
|
||||
|
||||
struct tc_defact {
|
||||
tc_gen;
|
||||
};
|
||||
|
||||
enum {
|
||||
TCA_DEF_UNSPEC,
|
||||
TCA_DEF_TM,
|
||||
TCA_DEF_PARMS,
|
||||
TCA_DEF_DATA,
|
||||
__TCA_DEF_MAX
|
||||
};
|
||||
#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
|
||||
|
||||
#endif
|
@@ -64,6 +64,20 @@
|
||||
|
||||
#include <asm/timex.h>
|
||||
|
||||
#ifndef random_get_entropy
|
||||
/*
|
||||
* The random_get_entropy() function is used by the /dev/random driver
|
||||
* in order to extract entropy via the relative unpredictability of
|
||||
* when an interrupt takes places versus a high speed, fine-grained
|
||||
* timing source or cycle counter. Since it will be occurred on every
|
||||
* single interrupt, it must have a very low cost/overhead.
|
||||
*
|
||||
* By default we use get_cycles() for this purpose, but individual
|
||||
* architectures may override this in their asm/timex.h header file.
|
||||
*/
|
||||
#define random_get_entropy() get_cycles()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SHIFT_PLL is used as a dampening factor to define how much we
|
||||
* adjust the frequency correction for a given offset in PLL mode.
|
||||
|
@@ -12,7 +12,7 @@ struct usb_phy_gen_xceiv_platform_data {
|
||||
unsigned int needs_reset:1;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
|
||||
#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
|
||||
/* sometimes transceivers are accessed only through e.g. ULPI */
|
||||
extern void usb_nop_xceiv_register(void);
|
||||
extern void usb_nop_xceiv_unregister(void);
|
||||
|
@@ -42,6 +42,7 @@ struct usbnet {
|
||||
struct usb_host_endpoint *status;
|
||||
unsigned maxpacket;
|
||||
struct timer_list delay;
|
||||
const char *padding_pkt;
|
||||
|
||||
/* protocol/interface state */
|
||||
struct net_device *net;
|
||||
|
@@ -66,7 +66,9 @@
|
||||
US_FLAG(INITIAL_READ10, 0x00100000) \
|
||||
/* Initial READ(10) (and others) must be retried */ \
|
||||
US_FLAG(WRITE_CACHE, 0x00200000) \
|
||||
/* Write Cache status is not available */
|
||||
/* Write Cache status is not available */ \
|
||||
US_FLAG(NEEDS_CAP16, 0x00400000)
|
||||
/* cannot handle READ_CAPACITY_10 */
|
||||
|
||||
#define US_FLAG(name, value) US_FL_##name = value ,
|
||||
enum { US_DO_ALL_FLAGS };
|
||||
|
@@ -65,15 +65,8 @@ struct pci_dev;
|
||||
* out of the arbitration process (and can be safe to take
|
||||
* interrupts at any time.
|
||||
*/
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
|
||||
unsigned int decodes);
|
||||
#else
|
||||
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
|
||||
unsigned int decodes)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vga_get - acquire & locks VGA resources
|
||||
|
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
|
||||
|
||||
struct yamdrv_ioctl_mcs {
|
||||
int cmd;
|
||||
int bitrate;
|
||||
unsigned int bitrate;
|
||||
unsigned char bits[YAM_FPGA_SIZE];
|
||||
};
|
||||
|
Reference in New Issue
Block a user