Merge branch 'sched/urgent' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
|
||||
#define bio_set_dev(bio, bdev) \
|
||||
do { \
|
||||
if ((bio)->bi_disk != (bdev)->bd_disk) \
|
||||
bio_clear_flag(bio, BIO_THROTTLED);\
|
||||
(bio)->bi_disk = (bdev)->bd_disk; \
|
||||
(bio)->bi_partno = (bdev)->bd_partno; \
|
||||
} while (0)
|
||||
|
@@ -50,8 +50,6 @@ struct blk_issue_stat {
|
||||
struct bio {
|
||||
struct bio *bi_next; /* request queue link */
|
||||
struct gendisk *bi_disk;
|
||||
u8 bi_partno;
|
||||
blk_status_t bi_status;
|
||||
unsigned int bi_opf; /* bottom bits req flags,
|
||||
* top bits REQ_OP. Use
|
||||
* accessors.
|
||||
@@ -59,8 +57,8 @@ struct bio {
|
||||
unsigned short bi_flags; /* status, etc and bvec pool number */
|
||||
unsigned short bi_ioprio;
|
||||
unsigned short bi_write_hint;
|
||||
|
||||
struct bvec_iter bi_iter;
|
||||
blk_status_t bi_status;
|
||||
u8 bi_partno;
|
||||
|
||||
/* Number of segments in this BIO after
|
||||
* physical address coalescing is performed.
|
||||
@@ -74,8 +72,9 @@ struct bio {
|
||||
unsigned int bi_seg_front_size;
|
||||
unsigned int bi_seg_back_size;
|
||||
|
||||
atomic_t __bi_remaining;
|
||||
struct bvec_iter bi_iter;
|
||||
|
||||
atomic_t __bi_remaining;
|
||||
bio_end_io_t *bi_end_io;
|
||||
|
||||
void *bi_private;
|
||||
|
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
|
||||
struct request {
|
||||
struct list_head queuelist;
|
||||
union {
|
||||
call_single_data_t csd;
|
||||
struct __call_single_data csd;
|
||||
u64 fifo_time;
|
||||
};
|
||||
|
||||
@@ -241,14 +241,24 @@ struct request {
|
||||
struct request *next_rq;
|
||||
};
|
||||
|
||||
static inline bool blk_op_is_scsi(unsigned int op)
|
||||
{
|
||||
return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
|
||||
}
|
||||
|
||||
static inline bool blk_op_is_private(unsigned int op)
|
||||
{
|
||||
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
|
||||
}
|
||||
|
||||
static inline bool blk_rq_is_scsi(struct request *rq)
|
||||
{
|
||||
return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
|
||||
return blk_op_is_scsi(req_op(rq));
|
||||
}
|
||||
|
||||
static inline bool blk_rq_is_private(struct request *rq)
|
||||
{
|
||||
return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
|
||||
return blk_op_is_private(req_op(rq));
|
||||
}
|
||||
|
||||
static inline bool blk_rq_is_passthrough(struct request *rq)
|
||||
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
|
||||
return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
|
||||
}
|
||||
|
||||
static inline bool bio_is_passthrough(struct bio *bio)
|
||||
{
|
||||
unsigned op = bio_op(bio);
|
||||
|
||||
return blk_op_is_scsi(op) || blk_op_is_private(op);
|
||||
}
|
||||
|
||||
static inline unsigned short req_get_ioprio(struct request *req)
|
||||
{
|
||||
return req->ioprio;
|
||||
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
extern void blk_rq_unprep_clone(struct request *rq);
|
||||
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
||||
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
||||
extern void blk_delay_queue(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_split(struct request_queue *, struct bio **);
|
||||
extern void blk_recount_segments(struct request_queue *, struct bio *);
|
||||
|
@@ -15,11 +15,11 @@
|
||||
* In practice this is far bigger than any realistic pointer offset; this limit
|
||||
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
|
||||
*/
|
||||
#define BPF_MAX_VAR_OFF (1ULL << 31)
|
||||
#define BPF_MAX_VAR_OFF (1 << 29)
|
||||
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
|
||||
* that converting umax_value to int cannot overflow.
|
||||
*/
|
||||
#define BPF_MAX_VAR_SIZ INT_MAX
|
||||
#define BPF_MAX_VAR_SIZ (1 << 29)
|
||||
|
||||
/* Liveness marks, used for registers and spilled-regs (in stack slots).
|
||||
* Read marks propagate upwards until they find a write mark; they record that
|
||||
|
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching reads or writes. The
|
||||
* compiler is also forbidden from reordering successive instances of
|
||||
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
|
||||
* compiler is aware of some particular ordering. One way to make the
|
||||
* compiler aware of ordering is to put the two invocations of READ_ONCE,
|
||||
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
|
||||
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
|
||||
* particular ordering. One way to make the compiler aware of ordering is to
|
||||
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
|
||||
* statements.
|
||||
*
|
||||
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
|
||||
* data types like structs or unions. If the size of the accessed data
|
||||
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
|
||||
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
|
||||
* least two memcpy()s: one for the __builtin_memcpy() and then one for
|
||||
* the macro doing the copy of variable - '__u' allocated on the stack.
|
||||
* These two macros will also work on aggregate data types like structs or
|
||||
* unions. If the size of the accessed data type exceeds the word size of
|
||||
* the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
|
||||
* fall back to memcpy(). There's at least two memcpy()s: one for the
|
||||
* __builtin_memcpy() and then one for the macro doing the copy of variable
|
||||
* - '__u' allocated on the stack.
|
||||
*
|
||||
* Their two major use cases are: (1) Mediating communication between
|
||||
* process-level code and irq/NMI handlers, all running on the same CPU,
|
||||
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
||||
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
||||
* mutilate accesses that either do not require ordering or that interact
|
||||
* with an explicit memory barrier or atomic instruction that provides the
|
||||
* required ordering.
|
||||
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
compiletime_assert(__native_word(t), \
|
||||
"Need native word sized stores/loads for atomicity.")
|
||||
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching accesses. The compiler
|
||||
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
|
||||
* but only when the compiler is aware of some particular ordering. One way
|
||||
* to make the compiler aware of ordering is to put the two invocations of
|
||||
* ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
|
||||
* on a union member will work as long as the size of the member matches the
|
||||
* size of the union and the size is smaller than word size.
|
||||
*
|
||||
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
|
||||
* between process-level code and irq/NMI handlers, all running on the same CPU,
|
||||
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
||||
* mutilate accesses that either do not require ordering or that interact
|
||||
* with an explicit memory barrier or atomic instruction that provides the
|
||||
* required ordering.
|
||||
*
|
||||
* If possible use READ_ONCE()/WRITE_ONCE() instead.
|
||||
*/
|
||||
#define __ACCESS_ONCE(x) ({ \
|
||||
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
|
||||
(volatile typeof(x) *)&(x); })
|
||||
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
|
||||
|
||||
#endif /* __LINUX_COMPILER_H */
|
||||
|
@@ -10,9 +10,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/wait.h>
|
||||
#ifdef CONFIG_LOCKDEP_COMPLETIONS
|
||||
#include <linux/lockdep.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* struct completion - structure used to maintain state for a "completion"
|
||||
@@ -29,58 +26,16 @@
|
||||
struct completion {
|
||||
unsigned int done;
|
||||
wait_queue_head_t wait;
|
||||
#ifdef CONFIG_LOCKDEP_COMPLETIONS
|
||||
struct lockdep_map_cross map;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_COMPLETIONS
|
||||
static inline void complete_acquire(struct completion *x)
|
||||
{
|
||||
lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
|
||||
}
|
||||
|
||||
static inline void complete_release(struct completion *x)
|
||||
{
|
||||
lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
static inline void complete_release_commit(struct completion *x)
|
||||
{
|
||||
lock_commit_crosslock((struct lockdep_map *)&x->map);
|
||||
}
|
||||
|
||||
#define init_completion_map(x, m) \
|
||||
do { \
|
||||
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
|
||||
(m)->name, (m)->key, 0); \
|
||||
__init_completion(x); \
|
||||
} while (0)
|
||||
|
||||
#define init_completion(x) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
|
||||
"(completion)" #x, \
|
||||
&__key, 0); \
|
||||
__init_completion(x); \
|
||||
} while (0)
|
||||
#else
|
||||
#define init_completion_map(x, m) __init_completion(x)
|
||||
#define init_completion(x) __init_completion(x)
|
||||
static inline void complete_acquire(struct completion *x) {}
|
||||
static inline void complete_release(struct completion *x) {}
|
||||
static inline void complete_release_commit(struct completion *x) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_COMPLETIONS
|
||||
#define COMPLETION_INITIALIZER(work) \
|
||||
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
|
||||
STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
|
||||
#else
|
||||
#define COMPLETION_INITIALIZER(work) \
|
||||
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
||||
#endif
|
||||
|
||||
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
|
||||
(*({ init_completion_map(&(work), &(map)); &(work); }))
|
||||
|
@@ -86,7 +86,7 @@ enum cpuhp_state {
|
||||
CPUHP_MM_ZSWP_POOL_PREPARE,
|
||||
CPUHP_KVM_PPC_BOOK3S_PREPARE,
|
||||
CPUHP_ZCOMP_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
CPUHP_TIMERS_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_BP_PREPARE_DYN,
|
||||
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
||||
|
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
|
||||
extern void set_groups(struct cred *, struct group_info *);
|
||||
extern int groups_search(const struct group_info *, kgid_t);
|
||||
extern bool may_setgroups(void);
|
||||
extern void groups_sort(struct group_info *);
|
||||
|
||||
/*
|
||||
* The security context of a task
|
||||
|
@@ -140,11 +140,13 @@ struct efi_boot_memmap {
|
||||
|
||||
struct capsule_info {
|
||||
efi_capsule_header_t header;
|
||||
efi_capsule_header_t *capsule;
|
||||
int reset_type;
|
||||
long index;
|
||||
size_t count;
|
||||
size_t total_size;
|
||||
phys_addr_t *pages;
|
||||
struct page **pages;
|
||||
phys_addr_t *phys;
|
||||
size_t page_bytes_remain;
|
||||
};
|
||||
|
||||
|
@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
|
||||
{
|
||||
if (fscache_cookie_valid(cookie) && PageFsCache(page))
|
||||
return __fscache_maybe_release_page(cookie, page, gfp);
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -66,9 +66,10 @@ struct gpio_irq_chip {
|
||||
/**
|
||||
* @lock_key:
|
||||
*
|
||||
* Per GPIO IRQ chip lockdep class.
|
||||
* Per GPIO IRQ chip lockdep classes.
|
||||
*/
|
||||
struct lock_class_key *lock_key;
|
||||
struct lock_class_key *request_key;
|
||||
|
||||
/**
|
||||
* @parent_handler:
|
||||
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
|
||||
|
||||
/* add/remove chips */
|
||||
extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
struct lock_class_key *lock_key);
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key);
|
||||
|
||||
/**
|
||||
* gpiochip_add_data() - register a gpio_chip
|
||||
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define gpiochip_add_data(chip, data) ({ \
|
||||
static struct lock_class_key key; \
|
||||
gpiochip_add_data_with_key(chip, data, &key); \
|
||||
static struct lock_class_key lock_key; \
|
||||
static struct lock_class_key request_key; \
|
||||
gpiochip_add_data_with_key(chip, data, &lock_key, \
|
||||
&request_key); \
|
||||
})
|
||||
#else
|
||||
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
|
||||
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
|
||||
#endif
|
||||
|
||||
static inline int gpiochip_add(struct gpio_chip *chip)
|
||||
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool threaded,
|
||||
struct lock_class_key *lock_key);
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
static struct lock_class_key key;
|
||||
static struct lock_class_key lock_key;
|
||||
static struct lock_class_key request_key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, &key);
|
||||
handler, type, false,
|
||||
&lock_key, &request_key);
|
||||
}
|
||||
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
|
||||
static struct lock_class_key key;
|
||||
static struct lock_class_key lock_key;
|
||||
static struct lock_class_key request_key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, &key);
|
||||
handler, type, true,
|
||||
&lock_key, &request_key);
|
||||
}
|
||||
#else
|
||||
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, NULL);
|
||||
handler, type, false, NULL, NULL);
|
||||
}
|
||||
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, NULL);
|
||||
handler, type, true, NULL, NULL);
|
||||
}
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct idr {
|
||||
struct radix_tree_root idr_rt;
|
||||
|
43
include/linux/intel-pti.h
Normal file
43
include/linux/intel-pti.h
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (C) Intel 2011
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* The PTI (Parallel Trace Interface) driver directs trace data routed from
|
||||
* various parts in the system out through the Intel Penwell PTI port and
|
||||
* out of the mobile device for analysis with a debugging tool
|
||||
* (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
|
||||
* compact JTAG, standard.
|
||||
*
|
||||
* This header file will allow other parts of the OS to use the
|
||||
* interface to write out it's contents for debugging a mobile system.
|
||||
*/
|
||||
|
||||
#ifndef LINUX_INTEL_PTI_H_
|
||||
#define LINUX_INTEL_PTI_H_
|
||||
|
||||
/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
|
||||
#define PTI_LASTDWORD_DTS 0x30
|
||||
|
||||
/* basic structure used as a write address to the PTI HW */
|
||||
struct pti_masterchannel {
|
||||
u8 master;
|
||||
u8 channel;
|
||||
};
|
||||
|
||||
/* the following functions are defined in misc/pti.c */
|
||||
void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
|
||||
struct pti_masterchannel *pti_request_masterchannel(u8 type,
|
||||
const char *thread_name);
|
||||
void pti_release_masterchannel(struct pti_masterchannel *mc);
|
||||
|
||||
#endif /* LINUX_INTEL_PTI_H_ */
|
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
|
||||
* 100: prefer care-of address
|
||||
*/
|
||||
dontfrag:1,
|
||||
autoflowlabel:1;
|
||||
autoflowlabel:1,
|
||||
autoflowlabel_set:1;
|
||||
__u8 min_hopcount;
|
||||
__u8 tclass;
|
||||
__be32 rcv_flowinfo;
|
||||
|
@@ -212,6 +212,7 @@ struct irq_data {
|
||||
* mask. Applies only to affinity managed irqs.
|
||||
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
|
||||
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
|
||||
* IRQD_CAN_RESERVE - Can use reservation mode
|
||||
*/
|
||||
enum {
|
||||
IRQD_TRIGGER_MASK = 0xf,
|
||||
@@ -233,6 +234,7 @@ enum {
|
||||
IRQD_MANAGED_SHUTDOWN = (1 << 23),
|
||||
IRQD_SINGLE_TARGET = (1 << 24),
|
||||
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
|
||||
IRQD_CAN_RESERVE = (1 << 26),
|
||||
};
|
||||
|
||||
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
||||
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
|
||||
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
|
||||
}
|
||||
|
||||
static inline void irqd_set_can_reserve(struct irq_data *d)
|
||||
{
|
||||
__irqd_to_state(d) |= IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
static inline void irqd_clr_can_reserve(struct irq_data *d)
|
||||
{
|
||||
__irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
static inline bool irqd_can_reserve(struct irq_data *d)
|
||||
{
|
||||
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
#undef __irqd_to_state
|
||||
|
||||
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
|
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
|
||||
data->chip = chip;
|
||||
}
|
||||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
static inline bool irq_balancing_disabled(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
|
||||
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
|
||||
}
|
||||
|
||||
static inline int irq_is_percpu(unsigned int irq)
|
||||
static inline bool irq_is_percpu(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
|
||||
return desc->status_use_accessors & IRQ_PER_CPU;
|
||||
}
|
||||
|
||||
static inline int irq_is_percpu_devid(unsigned int irq)
|
||||
static inline bool irq_is_percpu_devid(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
@@ -255,12 +255,15 @@ static inline int irq_is_percpu_devid(unsigned int irq)
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
|
||||
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
|
||||
struct lock_class_key *request_class)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (desc)
|
||||
lockdep_set_class(&desc->lock, class);
|
||||
if (desc) {
|
||||
lockdep_set_class(&desc->lock, lock_class);
|
||||
lockdep_set_class(&desc->request_mutex, request_class);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
||||
|
@@ -113,7 +113,7 @@ struct irq_domain_ops {
|
||||
unsigned int nr_irqs, void *arg);
|
||||
void (*free)(struct irq_domain *d, unsigned int virq,
|
||||
unsigned int nr_irqs);
|
||||
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
|
||||
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
|
||||
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
|
||||
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
|
||||
unsigned long *out_hwirq, unsigned int *out_type);
|
||||
|
@@ -1 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
@@ -232,7 +232,7 @@ struct kvm_vcpu {
|
||||
struct mutex mutex;
|
||||
struct kvm_run *run;
|
||||
|
||||
int guest_fpu_loaded, guest_xcr0_loaded;
|
||||
int guest_xcr0_loaded;
|
||||
struct swait_queue_head wq;
|
||||
struct pid __rcu *pid;
|
||||
int sigset_active;
|
||||
|
@@ -158,12 +158,6 @@ struct lockdep_map {
|
||||
int cpu;
|
||||
unsigned long ip;
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
/*
|
||||
* Whether it's a crosslock.
|
||||
*/
|
||||
int cross;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void lockdep_copy_map(struct lockdep_map *to,
|
||||
@@ -267,95 +261,8 @@ struct held_lock {
|
||||
unsigned int hardirqs_off:1;
|
||||
unsigned int references:12; /* 32 bits */
|
||||
unsigned int pin_count;
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
/*
|
||||
* Generation id.
|
||||
*
|
||||
* A value of cross_gen_id will be stored when holding this,
|
||||
* which is globally increased whenever each crosslock is held.
|
||||
*/
|
||||
unsigned int gen_id;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
#define MAX_XHLOCK_TRACE_ENTRIES 5
|
||||
|
||||
/*
|
||||
* This is for keeping locks waiting for commit so that true dependencies
|
||||
* can be added at commit step.
|
||||
*/
|
||||
struct hist_lock {
|
||||
/*
|
||||
* Id for each entry in the ring buffer. This is used to
|
||||
* decide whether the ring buffer was overwritten or not.
|
||||
*
|
||||
* For example,
|
||||
*
|
||||
* |<----------- hist_lock ring buffer size ------->|
|
||||
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
|
||||
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
|
||||
*
|
||||
* where 'p' represents an acquisition in process
|
||||
* context, 'i' represents an acquisition in irq
|
||||
* context.
|
||||
*
|
||||
* In this example, the ring buffer was overwritten by
|
||||
* acquisitions in irq context, that should be detected on
|
||||
* rollback or commit.
|
||||
*/
|
||||
unsigned int hist_id;
|
||||
|
||||
/*
|
||||
* Seperate stack_trace data. This will be used at commit step.
|
||||
*/
|
||||
struct stack_trace trace;
|
||||
unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
|
||||
|
||||
/*
|
||||
* Seperate hlock instance. This will be used at commit step.
|
||||
*
|
||||
* TODO: Use a smaller data structure containing only necessary
|
||||
* data. However, we should make lockdep code able to handle the
|
||||
* smaller one first.
|
||||
*/
|
||||
struct held_lock hlock;
|
||||
};
|
||||
|
||||
/*
|
||||
* To initialize a lock as crosslock, lockdep_init_map_crosslock() should
|
||||
* be called instead of lockdep_init_map().
|
||||
*/
|
||||
struct cross_lock {
|
||||
/*
|
||||
* When more than one acquisition of crosslocks are overlapped,
|
||||
* we have to perform commit for them based on cross_gen_id of
|
||||
* the first acquisition, which allows us to add more true
|
||||
* dependencies.
|
||||
*
|
||||
* Moreover, when no acquisition of a crosslock is in progress,
|
||||
* we should not perform commit because the lock might not exist
|
||||
* any more, which might cause incorrect memory access. So we
|
||||
* have to track the number of acquisitions of a crosslock.
|
||||
*/
|
||||
int nr_acquire;
|
||||
|
||||
/*
|
||||
* Seperate hlock instance. This will be used at commit step.
|
||||
*
|
||||
* TODO: Use a smaller data structure containing only necessary
|
||||
* data. However, we should make lockdep code able to handle the
|
||||
* smaller one first.
|
||||
*/
|
||||
struct held_lock hlock;
|
||||
};
|
||||
|
||||
struct lockdep_map_cross {
|
||||
struct lockdep_map map;
|
||||
struct cross_lock xlock;
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialization, self-test and debugging-output methods:
|
||||
*/
|
||||
@@ -560,37 +467,6 @@ enum xhlock_context_t {
|
||||
XHLOCK_CTX_NR,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
|
||||
const char *name,
|
||||
struct lock_class_key *key,
|
||||
int subclass);
|
||||
extern void lock_commit_crosslock(struct lockdep_map *lock);
|
||||
|
||||
/*
|
||||
* What we essencially have to initialize is 'nr_acquire'. Other members
|
||||
* will be initialized in add_xlock().
|
||||
*/
|
||||
#define STATIC_CROSS_LOCK_INIT() \
|
||||
{ .nr_acquire = 0,}
|
||||
|
||||
#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
|
||||
{ .map.name = (_name), .map.key = (void *)(_key), \
|
||||
.map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
|
||||
|
||||
/*
|
||||
* To initialize a lockdep_map statically use this macro.
|
||||
* Note that _name must not be NULL.
|
||||
*/
|
||||
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
|
||||
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
|
||||
|
||||
extern void crossrelease_hist_start(enum xhlock_context_t c);
|
||||
extern void crossrelease_hist_end(enum xhlock_context_t c);
|
||||
extern void lockdep_invariant_state(bool force);
|
||||
extern void lockdep_init_task(struct task_struct *task);
|
||||
extern void lockdep_free_task(struct task_struct *task);
|
||||
#else /* !CROSSRELEASE */
|
||||
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
|
||||
/*
|
||||
* To initialize a lockdep_map statically use this macro.
|
||||
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
|
||||
static inline void lockdep_invariant_state(bool force) {}
|
||||
static inline void lockdep_init_task(struct task_struct *task) {}
|
||||
static inline void lockdep_free_task(struct task_struct *task) {}
|
||||
#endif /* CROSSRELEASE */
|
||||
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
|
||||
|
@@ -915,10 +915,10 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
|
||||
#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
|
||||
|
||||
enum dev_aspm_mode {
|
||||
DEV_ASPM_DISABLE = 0,
|
||||
DEV_ASPM_DYNAMIC,
|
||||
DEV_ASPM_BACKDOOR,
|
||||
DEV_ASPM_STATIC,
|
||||
DEV_ASPM_DISABLE,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
|
||||
};
|
||||
|
||||
struct mlx5_irq_info {
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
};
|
||||
|
||||
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
enum mlx5_eq_type type);
|
||||
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn);
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
@@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
|
||||
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
||||
u64 *values,
|
||||
int num_counters,
|
||||
size_t *offsets);
|
||||
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
|
||||
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
|
||||
|
||||
|
@@ -147,7 +147,7 @@ enum {
|
||||
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
|
||||
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
|
||||
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
|
||||
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
|
||||
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
|
||||
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
|
||||
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
|
||||
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
|
||||
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
|
||||
u8 vxlan_udp_port[0x10];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_set_rate_limit_out_bits {
|
||||
struct mlx5_ifc_set_pp_rate_limit_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_set_rate_limit_in_bits {
|
||||
struct mlx5_ifc_set_pp_rate_limit_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 rate_limit[0x20];
|
||||
|
||||
u8 reserved_at_a0[0x160];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_access_register_out_bits {
|
||||
|
@@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
||||
return tsk->signal->oom_mm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this helper if tsk->mm != mm and the victim mm needs a special
|
||||
* handling. This is guaranteed to stay true after once set.
|
||||
*/
|
||||
static inline bool mm_is_oom_victim(struct mm_struct *mm)
|
||||
{
|
||||
return test_bit(MMF_OOM_VICTIM, &mm->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether a page fault on the given mm is still reliable.
|
||||
* This is no longer true if the oom reaper started to reap the
|
||||
|
@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
|
||||
static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
|
||||
unsigned int devfn)
|
||||
{ return NULL; }
|
||||
static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
|
||||
unsigned int bus, unsigned int devfn)
|
||||
{ return NULL; }
|
||||
|
||||
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
|
||||
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#define _LINUX_PERF_EVENT_H
|
||||
|
||||
#include <uapi/linux/perf_event.h>
|
||||
#include <uapi/linux/bpf_perf_event.h>
|
||||
|
||||
/*
|
||||
* Kernel-internal data types and definitions:
|
||||
@@ -787,7 +788,7 @@ struct perf_output_handle {
|
||||
};
|
||||
|
||||
struct bpf_perf_event_data_kern {
|
||||
struct pt_regs *regs;
|
||||
bpf_user_pt_regs_t *regs;
|
||||
struct perf_sample_data *data;
|
||||
struct perf_event *event;
|
||||
};
|
||||
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
|
||||
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
|
||||
# define perf_instruction_pointer(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
#ifndef perf_arch_bpf_user_pt_regs
|
||||
# define perf_arch_bpf_user_pt_regs(regs) regs
|
||||
#endif
|
||||
|
||||
static inline bool has_branch_stack(struct perf_event *event)
|
||||
{
|
||||
|
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
|
||||
extern int pm_generic_poweroff(struct device *dev);
|
||||
extern void pm_generic_complete(struct device *dev);
|
||||
|
||||
extern void dev_pm_skip_next_resume_phases(struct device *dev);
|
||||
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
@@ -1,43 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) Intel 2011
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* The PTI (Parallel Trace Interface) driver directs trace data routed from
|
||||
* various parts in the system out through the Intel Penwell PTI port and
|
||||
* out of the mobile device for analysis with a debugging tool
|
||||
* (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
|
||||
* compact JTAG, standard.
|
||||
*
|
||||
* This header file will allow other parts of the OS to use the
|
||||
* interface to write out it's contents for debugging a mobile system.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#ifndef _INCLUDE_PTI_H
|
||||
#define _INCLUDE_PTI_H
|
||||
|
||||
#ifndef PTI_H_
|
||||
#define PTI_H_
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
#include <asm/pti.h>
|
||||
#else
|
||||
static inline void pti_init(void) { }
|
||||
#endif
|
||||
|
||||
/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
|
||||
#define PTI_LASTDWORD_DTS 0x30
|
||||
|
||||
/* basic structure used as a write address to the PTI HW */
|
||||
struct pti_masterchannel {
|
||||
u8 master;
|
||||
u8 channel;
|
||||
};
|
||||
|
||||
/* the following functions are defined in misc/pti.c */
|
||||
void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
|
||||
struct pti_masterchannel *pti_request_masterchannel(u8 type,
|
||||
const char *thread_name);
|
||||
void pti_release_masterchannel(struct pti_masterchannel *mc);
|
||||
|
||||
#endif /*PTI_H_*/
|
||||
#endif
|
||||
|
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
|
||||
|
||||
/* Note: callers invoking this in a loop must use a compiler barrier,
|
||||
* for example cpu_relax(). Callers must hold producer_lock.
|
||||
* Callers are responsible for making sure pointer that is being queued
|
||||
* points to a valid data.
|
||||
*/
|
||||
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
|
||||
{
|
||||
if (unlikely(!r->size) || r->queue[r->producer])
|
||||
return -ENOSPC;
|
||||
|
||||
/* Make sure the pointer we are storing points to a valid data. */
|
||||
/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
|
||||
smp_wmb();
|
||||
|
||||
r->queue[r->producer++] = ptr;
|
||||
if (unlikely(r->producer >= r->size))
|
||||
r->producer = 0;
|
||||
@@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
|
||||
if (ptr)
|
||||
__ptr_ring_discard_one(r);
|
||||
|
||||
/* Make sure anyone accessing data through the pointer is up to date. */
|
||||
/* Pairs with smp_wmb in __ptr_ring_produce. */
|
||||
smp_read_barrier_depends();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root_cached *root);
|
||||
|
||||
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
|
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_add_tail_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the end of the specified hlist_nulls,
|
||||
* while permitting racing traversals. NOTE: tail insertion requires
|
||||
* list traversal.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
||||
* or hlist_nulls_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
||||
struct hlist_nulls_head *h)
|
||||
{
|
||||
struct hlist_nulls_node *i, *last = NULL;
|
||||
|
||||
for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
|
||||
i = hlist_nulls_next_rcu(i))
|
||||
last = i;
|
||||
|
||||
if (last) {
|
||||
n->next = last->next;
|
||||
n->pprev = &last->next;
|
||||
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
|
||||
} else {
|
||||
hlist_nulls_add_head_rcu(n, h);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
|
@@ -10,9 +10,6 @@
|
||||
*/
|
||||
typedef struct {
|
||||
arch_rwlock_t raw_lock;
|
||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||
unsigned int break_lock;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
unsigned int magic, owner_cpu;
|
||||
void *owner;
|
||||
|
@@ -849,17 +849,6 @@ struct task_struct {
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
#define MAX_XHLOCKS_NR 64UL
|
||||
struct hist_lock *xhlocks; /* Crossrelease history locks */
|
||||
unsigned int xhlock_idx;
|
||||
/* For restoring at history boundaries */
|
||||
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
|
||||
unsigned int hist_id;
|
||||
/* For overwrite check at each context exit */
|
||||
unsigned int hist_id_save[XHLOCK_CTX_NR];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UBSAN
|
||||
unsigned int in_ubsan;
|
||||
#endif
|
||||
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
|
||||
__set_task_comm(tsk, from, false);
|
||||
}
|
||||
|
||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
|
||||
#define get_task_comm(buf, tsk) ({ \
|
||||
BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
|
||||
__get_task_comm(buf, sizeof(buf), tsk); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void scheduler_ipi(void);
|
||||
|
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
||||
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
|
||||
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
||||
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
||||
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
||||
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
||||
|
||||
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
||||
|
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
|
||||
* for that name. This appears in the sysfs "modalias" attribute
|
||||
* for driver coldplugging, and in uevents used for hotplugging
|
||||
* @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
|
||||
* when not using a GPIO line)
|
||||
* not using a GPIO line)
|
||||
*
|
||||
* @statistics: statistics for the spi_device
|
||||
*
|
||||
|
@@ -107,16 +107,11 @@ do { \
|
||||
|
||||
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
|
||||
|
||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||
#define raw_spin_is_contended(lock) ((lock)->break_lock)
|
||||
#else
|
||||
|
||||
#ifdef arch_spin_is_contended
|
||||
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
|
||||
#else
|
||||
#define raw_spin_is_contended(lock) (((void)(lock), 0))
|
||||
#endif /*arch_spin_is_contended*/
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This barrier must provide two things:
|
||||
|
@@ -19,9 +19,6 @@
|
||||
|
||||
typedef struct raw_spinlock {
|
||||
arch_spinlock_t raw_lock;
|
||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||
unsigned int break_lock;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
unsigned int magic, owner_cpu;
|
||||
void *owner;
|
||||
|
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
|
||||
{
|
||||
__kernel_size_t ret;
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
if (p_size == (size_t)-1)
|
||||
|
||||
/* Work around gcc excess stack consumption issue */
|
||||
if (p_size == (size_t)-1 ||
|
||||
(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
|
||||
return __builtin_strlen(p);
|
||||
ret = strnlen(p, p_size);
|
||||
if (p_size <= ret)
|
||||
|
@@ -117,6 +117,12 @@ struct attribute_group {
|
||||
.show = _name##_show, \
|
||||
}
|
||||
|
||||
#define __ATTR_RO_MODE(_name, _mode) { \
|
||||
.attr = { .name = __stringify(_name), \
|
||||
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
|
||||
.show = _name##_show, \
|
||||
}
|
||||
|
||||
#define __ATTR_WO(_name) { \
|
||||
.attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
|
||||
.store = _name##_store, \
|
||||
|
@@ -224,7 +224,8 @@ struct tcp_sock {
|
||||
rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
|
||||
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
|
||||
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
|
||||
unused:3;
|
||||
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
|
||||
unused:2;
|
||||
u8 nonagle : 4,/* Disable Nagle algorithm? */
|
||||
thin_lto : 1,/* Use linear timeouts for thin streams */
|
||||
unused1 : 1,
|
||||
|
@@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);
|
||||
extern void tick_nohz_irq_exit(void);
|
||||
extern ktime_t tick_nohz_get_sleep_length(void);
|
||||
extern unsigned long tick_nohz_get_idle_calls(void);
|
||||
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
|
||||
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
||||
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
||||
#else /* !CONFIG_NO_HZ_COMMON */
|
||||
|
@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
|
||||
unsigned long round_jiffies_up_relative(unsigned long j);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int timers_prepare_cpu(unsigned int cpu);
|
||||
int timers_dead_cpu(unsigned int cpu);
|
||||
#else
|
||||
#define timers_dead_cpu NULL
|
||||
#define timers_prepare_cpu NULL
|
||||
#define timers_dead_cpu NULL
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -18,7 +18,7 @@
|
||||
*/
|
||||
struct trace_export {
|
||||
struct trace_export __rcu *next;
|
||||
void (*write)(const void *, unsigned int);
|
||||
void (*write)(struct trace_export *, const void *, unsigned int);
|
||||
};
|
||||
|
||||
int register_ftrace_export(struct trace_export *export);
|
||||
|
@@ -82,6 +82,7 @@ struct usbnet {
|
||||
# define EVENT_RX_KILL 10
|
||||
# define EVENT_LINK_CHANGE 11
|
||||
# define EVENT_SET_RX_MODE 12
|
||||
# define EVENT_NO_IP_ALIGN 13
|
||||
};
|
||||
|
||||
static inline struct usb_driver *driver_of(struct usb_interface *intf)
|
||||
|
Reference in New Issue
Block a user