Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Topic branch for stable KVM clockource under Hyper-V.

Thanks to Christoffer Dall for resolving the ARM conflict.
This commit is contained in:
Radim Krčmář
2018-02-01 15:04:17 +01:00
2721 changed files with 90777 additions and 47548 deletions

View File

@@ -451,6 +451,7 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -640,6 +641,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
static inline const char *
acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
{
return NULL;
}
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;

View File

@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}

View File

@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
* holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
* associated wb's list_lock.
*/
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&

View File

@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv->bv_len = iter.bi_bvec_done;
}
static inline unsigned bio_pages_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
return bio->bi_vcnt;
}
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
return bio->bi_io_vec;
}
static inline struct page *bio_first_page_all(struct bio *bio)
{
return bio_first_bvec_all(bio)->bv_page;
}
static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
#endif
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -492,6 +514,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
#define bio_set_dev(bio, bdev) \
do { \
if ((bio)->bi_disk != (bdev)->bd_disk) \
bio_clear_flag(bio, BIO_THROTTLED);\
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
} while (0)

View File

@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
struct blkg_rwstat v = blkg_rwstat_read(from);
u64 sum[BLKG_RWSTAT_NR];
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
atomic64_add(atomic64_read(&v.aux_cnt[i]) +
atomic64_read(&from->aux_cnt[i]),
sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
for (i = 0; i < BLKG_RWSTAT_NR; i++)
atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}

View File

@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
struct srcu_struct queue_rq_srcu[0];
struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {

View File

@@ -39,6 +39,34 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_AGAIN ((__force blk_status_t)12)
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
*
* Description:
* This classifies block error status into non-retryable errors and ones
* that may be successful if retried on a failover path.
*
* Return:
* %false - retrying failover path will not help
* %true - may succeed if retried
*/
static inline bool blk_path_error(blk_status_t error)
{
switch (error) {
case BLK_STS_NOTSUPP:
case BLK_STS_NOSPC:
case BLK_STS_TARGET:
case BLK_STS_NEXUS:
case BLK_STS_MEDIUM:
case BLK_STS_PROTECTION:
return false;
}
/* Anything else could be a path failure, so should be retried */
return true;
}
struct blk_issue_stat {
u64 stat;
};
@@ -50,8 +78,6 @@ struct blk_issue_stat {
struct bio {
struct bio *bi_next; /* request queue link */
struct gendisk *bi_disk;
u8 bi_partno;
blk_status_t bi_status;
unsigned int bi_opf; /* bottom bits req flags,
* top bits REQ_OP. Use
* accessors.
@@ -59,8 +85,8 @@ struct bio {
unsigned short bi_flags; /* status, etc and bvec pool number */
unsigned short bi_ioprio;
unsigned short bi_write_hint;
struct bvec_iter bi_iter;
blk_status_t bi_status;
u8 bi_partno;
/* Number of segments in this BIO after
* physical address coalescing is performed.
@@ -74,8 +100,9 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
void *bi_private;

View File

@@ -27,6 +27,8 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
#include <linux/seqlock.h>
#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
/* timeout is expired */
#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* already slept for hybrid poll */
#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
struct list_head queuelist;
union {
call_single_data_t csd;
u64 fifo_time;
};
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -148,8 +150,6 @@ struct request {
int internal_tag;
unsigned long atomic_flags;
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
@@ -158,6 +158,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
struct list_head queuelist;
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
struct blk_issue_stat issue_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
unsigned long long io_start_time_ns; /* when passed to hardware */
#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
unsigned short nr_integrity_segments;
#endif
unsigned short write_hint;
unsigned short ioprio;
unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */
unsigned short write_hint;
/*
* On blk-mq, the lower bits of ->gstate (generation number and
* state) carry the MQ_RQ_* state value and the upper bits the
* generation number which is monotonically incremented and used to
* distinguish the reuse instances.
*
* ->gstate_seq allows updates to ->gstate and other fields
* (currently ->deadline) during request start to be read
* atomically from the timeout path, so that it can operate on a
* coherent set of information.
*/
seqcount_t gstate_seq;
u64 gstate;
/*
* ->aborted_gstate is used by the timeout to claim a specific
* recycle instance of this request. See blk_mq_timeout_work().
*/
struct u64_stats_sync aborted_gstate_sync;
u64 aborted_gstate;
/* access through blk_rq_set_deadline, blk_rq_deadline */
unsigned long __deadline;
unsigned long deadline;
struct list_head timeout_list;
union {
struct __call_single_data csd;
u64 fifo_time;
};
/*
* completion callback.
*/
@@ -239,16 +264,32 @@ struct request {
/* for bidi */
struct request *next_rq;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
unsigned long long io_start_time_ns; /* when passed to hardware */
#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
{
return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
}
static inline bool blk_op_is_private(unsigned int op)
{
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
static inline bool blk_rq_is_scsi(struct request *rq)
{
return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
return blk_op_is_scsi(req_op(rq));
}
static inline bool blk_rq_is_private(struct request *rq)
{
return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
return blk_op_is_private(req_op(rq));
}
static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +297,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
}
static inline bool bio_is_passthrough(struct bio *bio)
{
unsigned op = bio_op(bio);
return blk_op_is_scsi(op) || blk_op_is_private(op);
}
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -546,6 +594,22 @@ struct request_queue {
struct queue_limits limits;
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
* 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
* bits which indicates if a zone is conventional (bit clear) or
* sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
* bits which indicates if a zone is write locked, that is, if a write
* request targeting the zone was dispatched. All three fields are
* initialized by the low level device driver (e.g. scsi/sd.c).
* Stacking drivers (device mappers) may or may not initialize
* these fields.
*/
unsigned int nr_zones;
unsigned long *seq_zones_bitmap;
unsigned long *seq_zones_wlock;
/*
* sg stuff
*/
@@ -790,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return q->nr_zones;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
if (!blk_queue_is_zoned(q))
return 0;
return sector >> ilog2(q->limits.chunk_sectors);
}
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
sector_t sector)
{
if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
static inline bool rq_is_sync(struct request *rq)
{
return op_is_sync(rq->cmd_flags);
@@ -948,7 +1033,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_recount_segments(struct request_queue *, struct bio *);
@@ -1029,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
}
static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
{
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
}
/*
* Some commands like WRITE SAME have a payload or data transfer size which
* is different from the size of the request. Any driver that supports such
@@ -1578,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
if (q)
return blk_queue_zone_sectors(q);
return 0;
}
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return blk_queue_nr_zones(q);
return 0;
}
@@ -1714,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
@@ -1954,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
#ifdef CONFIG_BLK_DEV_ZONED
bool blk_req_needs_zone_write_lock(struct request *rq);
void __blk_req_zone_write_lock(struct request *rq);
void __blk_req_zone_write_unlock(struct request *rq);
static inline void blk_req_zone_write_lock(struct request *rq)
{
if (blk_req_needs_zone_write_lock(rq))
__blk_req_zone_write_lock(rq);
}
static inline void blk_req_zone_write_unlock(struct request *rq)
{
if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
__blk_req_zone_write_unlock(rq);
}
static inline bool blk_req_zone_is_write_locked(struct request *rq)
{
return rq->q->seq_zones_wlock &&
test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
}
static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
{
if (!blk_req_needs_zone_write_lock(rq))
return true;
return !blk_req_zone_is_write_locked(rq);
}
#else
static inline bool blk_req_needs_zone_write_lock(struct request *rq)
{
return false;
}
static inline void blk_req_zone_write_lock(struct request *rq)
{
}
static inline void blk_req_zone_write_unlock(struct request *rq)
{
}
static inline bool blk_req_zone_is_write_locked(struct request *rq)
{
return false;
}
static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
{
return true;
}
#endif /* CONFIG_BLK_DEV_ZONED */
#else /* CONFIG_BLOCK */
struct block_device;

View File

@@ -43,7 +43,14 @@ struct bpf_map_ops {
};
struct bpf_map {
atomic_t refcnt;
/* 1st cacheline with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
*/
const struct bpf_map_ops *ops ____cacheline_aligned;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
void *security;
#endif
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
u32 pages;
u32 id;
int numa_node;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
bool unpriv_array;
/* 7 bytes hole */
/* 2nd cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
struct user_struct *user ____cacheline_aligned;
atomic_t refcnt;
atomic_t usercnt;
struct bpf_map *inner_map_meta;
struct work_struct work;
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
#endif
};
/* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
struct bpf_array {
struct bpf_map map;
u32 elem_size;
u32 index_mask;
/* 'ownership' of prog_array is claimed by the first program that
* is going to use this map or by the first program which FD is stored
* in the map to make sure that all callers and callees have the same
@@ -419,6 +429,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
attr->numa_node : NUMA_NO_NODE;
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -506,6 +518,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
{
return 0;
}
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
enum bpf_prog_type type)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,6 +532,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
return bpf_prog_get_type_dev(ufd, type, false);
}
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);

View File

@@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/
#define BPF_MAX_VAR_OFF (1ULL << 31)
#define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ INT_MAX
#define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that

View File

@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
.bi_sector = 0, \
.bi_size = UINT_MAX, \
.bi_idx = 0, \
.bi_bvec_done = 0, \
}
#endif /* __LINUX_BVEC_ITER_H */

View File

@@ -219,7 +219,7 @@
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#ifdef RANDSTRUCT_PLUGIN
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
#endif

View File

@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* particular ordering. One way to make the compiler aware of ordering is to
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
* statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
* These two macros will also work on aggregate data types like structs or
* unions. If the size of the accessed data type exceeds the word size of
* the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
* fall back to memcpy(). There's at least two memcpy()s: one for the
* __builtin_memcpy() and then one for the macro doing the copy of variable
* - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
* but only when the compiler is aware of some particular ordering. One way
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
#endif /* __LINUX_COMPILER_H */

View File

@@ -10,9 +10,6 @@
*/
#include <linux/wait.h>
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#include <linux/lockdep.h>
#endif
/*
* struct completion - structure used to maintain state for a "completion"
@@ -29,58 +26,15 @@
struct completion {
unsigned int done;
wait_queue_head_t wait;
#ifdef CONFIG_LOCKDEP_COMPLETIONS
struct lockdep_map_cross map;
#endif
};
#ifdef CONFIG_LOCKDEP_COMPLETIONS
static inline void complete_acquire(struct completion *x)
{
lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
}
static inline void complete_release(struct completion *x)
{
lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
}
static inline void complete_release_commit(struct completion *x)
{
lock_commit_crosslock((struct lockdep_map *)&x->map);
}
#define init_completion_map(x, m) \
do { \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
(m)->name, (m)->key, 0); \
__init_completion(x); \
} while (0)
#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
"(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
#define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
static inline void complete_release_commit(struct completion *x) {}
#endif
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); }))

View File

@@ -275,6 +275,50 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
#define CPER_ARM_CACHE_ERROR 0
#define CPER_ARM_TLB_ERROR 1
#define CPER_ARM_BUS_ERROR 2
#define CPER_ARM_VENDOR_ERROR 3
#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
#define CPER_ARM_ERR_OPERATION_SHIFT 18
#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
#define CPER_ARM_ERR_LEVEL_SHIFT 22
#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
#define CPER_ARM_ERR_CORRECTED_SHIFT 26
#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
/* Reset to default packing */
#pragma pack()
extern const char * const cper_proc_error_type_strs[4];
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc);
#endif

View File

@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
extern ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,

View File

@@ -30,9 +30,6 @@
struct cpufreq_policy;
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
struct thermal_cooling_device *
cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
* @policy: cpufreq policy.
*/
#ifdef CONFIG_THERMAL_OF
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
struct cpufreq_policy *policy);
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
return NULL;
}
#endif
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
@@ -90,28 +50,6 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func)
{
return NULL;
}
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
return NULL;
}
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
@@ -120,4 +58,19 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
}
#endif /* CONFIG_CPU_THERMAL */
#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @policy: cpufreq policy.
*/
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy *policy);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
#endif /* __CPU_COOLING_H__ */

View File

@@ -86,7 +86,7 @@ enum cpuhp_state {
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,

View File

@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
#define VMCOREINFO_SYMBOL_ARRAY(name) \
vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
#define VMCOREINFO_SIZE(name) \
vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
(unsigned long)sizeof(name))

View File

@@ -5,12 +5,19 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
{
return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
}
#endif /* _LINUX_CRC_CCITT_H */

View File

@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
extern bool may_setgroups(void);
extern void groups_sort(struct group_info *);
/*
* The security context of a task

View File

@@ -71,7 +71,7 @@ extern void delayacct_init(void);
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
extern void __delayacct_blkio_end(void);
extern void __delayacct_blkio_end(struct task_struct *);
extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
__delayacct_blkio_start();
}
static inline void delayacct_blkio_end(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{
if (current->delays)
__delayacct_blkio_end();
__delayacct_blkio_end(p);
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
}
@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
{}
static inline void delayacct_blkio_start(void)
{}
static inline void delayacct_blkio_end(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{}
static inline int delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk)

View File

@@ -140,11 +140,13 @@ struct efi_boot_memmap {
struct capsule_info {
efi_capsule_header_t header;
efi_capsule_header_t *capsule;
int reset_type;
long index;
size_t count;
size_t total_size;
phys_addr_t *pages;
struct page **pages;
phys_addr_t *phys;
size_t page_bytes_remain;
};

View File

@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,

View File

@@ -639,7 +639,7 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
u64 i_version;
atomic64_t i_version;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -2036,21 +2036,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
/**
* inode_inc_iversion - increments i_version
* @inode: inode that need to be updated
*
* Every time the inode is modified, the i_version field will be incremented.
* The filesystem has to be mounted with i_version flag
*/
static inline void inode_inc_iversion(struct inode *inode)
{
spin_lock(&inode->i_lock);
inode->i_version++;
spin_unlock(&inode->i_lock);
}
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,

View File

@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
{
if (fscache_cookie_valid(cookie) && PageFsCache(page))
return __fscache_maybe_release_page(cookie, page, gfp);
return false;
return true;
}
/**

View File

@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
/*
@@ -764,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* for init task */
#define INIT_FTRACE_GRAPH .ret_stack = NULL,
/*
* Stack of return addresses for functions
* of a thread.
@@ -844,7 +843,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -923,10 +921,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
#endif
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -935,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);

View File

@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds genl mutex.
* the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())

View File

@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
{
device_add_disk_no_queue_reg(NULL, disk);
}
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);

View File

@@ -66,9 +66,10 @@ struct gpio_irq_chip {
/**
* @lock_key:
*
* Per GPIO IRQ chip lockdep class.
* Per GPIO IRQ chip lockdep classes.
*/
struct lock_class_key *lock_key;
struct lock_class_key *request_key;
/**
* @parent_handler:
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
/* add/remove chips */
extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct lock_class_key *lock_key);
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
*/
#ifdef CONFIG_LOCKDEP
#define gpiochip_add_data(chip, data) ({ \
static struct lock_class_key key; \
gpiochip_add_data_with_key(chip, data, &key); \
static struct lock_class_key lock_key; \
static struct lock_class_key request_key; \
gpiochip_add_data_with_key(chip, data, &lock_key, \
&request_key); \
})
#else
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
#endif
static inline int gpiochip_add(struct gpio_chip *chip)
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type,
bool threaded,
struct lock_class_key *lock_key);
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
#ifdef CONFIG_LOCKDEP
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type)
{
static struct lock_class_key key;
static struct lock_class_key lock_key;
static struct lock_class_key request_key;
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, false, &key);
handler, type, false,
&lock_key, &request_key);
}
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
unsigned int type)
{
static struct lock_class_key key;
static struct lock_class_key lock_key;
static struct lock_class_key request_key;
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, true, &key);
handler, type, true,
&lock_key, &request_key);
}
#else
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
unsigned int type)
{
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, false, NULL);
handler, type, false, NULL, NULL);
}
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
unsigned int type)
{
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, true, NULL);
handler, type, true, NULL, NULL);
}
#endif /* CONFIG_LOCKDEP */

View File

@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
*
* HRTIMER_MODE_ABS - Time value is absolute
* HRTIMER_MODE_REL - Time value is relative to now
* HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
* when starting the timer)
* HRTIMER_MODE_SOFT - Timer callback function will be executed in
* soft irq context
*/
enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
HRTIMER_MODE_ABS_PINNED = 0x02,
HRTIMER_MODE_REL_PINNED = 0x03,
HRTIMER_MODE_ABS = 0x00,
HRTIMER_MODE_REL = 0x01,
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_SOFT = 0x04,
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
};
/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
* @is_soft: Set if hrtimer will be expired in soft interrupt context.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
u8 is_soft;
};
/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
# define HRTIMER_CLOCK_BASE_ALIGN 64
# define __hrtimer_clock_base_align ____cacheline_aligned
#else
# define HRTIMER_CLOCK_BASE_ALIGN 32
# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
int index;
unsigned int index;
clockid_t clockid;
seqcount_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
/*
/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
* @migration_enabled: The migration of hrtimers to other cpus is enabled
* @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
* @softirq_activated: displays, if the softirq is raised - update of softirq
* related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @expires_next: absolute time of the next event, is required for remote
* hrtimer enqueue; it is the total first expiry time (hard
* and soft hrtimer are taken into account)
* @next_timer: Pointer to the first expiring timer
* @softirq_expires_next: Time to check, if soft queues needs also to be expired
* @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +200,28 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
seqcount_t seq;
struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
bool migration_enabled;
bool nohz_active;
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hres_active : 1,
hang_detected : 1;
ktime_t expires_next;
struct hrtimer *next_timer;
unsigned int nr_events;
unsigned int nr_retries;
unsigned int nr_hangs;
unsigned short nr_retries;
unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
timer->base->cpu_base->hres_active : 0;
}
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
}
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
}
static inline void clock_was_set_delayed(void) { }
#endif
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
* hrtimer_start - (re)start an hrtimer on the current CPU
* hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
* @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
* relative (HRTIMER_MODE_REL)
* @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
* relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
* softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
return timer->base->cpu_base->running == timer;
return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
int clock);
clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */

View File

@@ -15,6 +15,7 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
#include <linux/bug.h>
struct idr {
struct radix_tree_root idr_rt;

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file discribe the STM32 DFSDM IIO driver API for audio part
*
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
*/
#ifndef STM32_DFSDM_ADC_H
#define STM32_DFSDM_ADC_H
int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
int (*cb)(const void *data, size_t size,
void *private),
void *private);
int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
#endif

View File

@@ -133,6 +133,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
int (*cb)(const void *data,
void *private),
void *private);
/**
* iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
* @cb_buffer: The callback buffer from whom we want the channel
* information.
* @watermark: buffer watermark in bytes.
*
* This function allows to configure the buffer watermark.
*/
int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
size_t watermark);
/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
@@ -215,6 +226,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
*/
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
* iio_write_channel_attribute() - Write values to the device attribute.
* @chan: The channel being queried.
* @val: Value being written.
* @val2: Value being written.val2 use depends on attribute type.
* @attribute: info attribute to be read.
*
* Returns an error code or 0.
*/
int iio_write_channel_attribute(struct iio_channel *chan, int val,
int val2, enum iio_chan_info_enum attribute);
/**
* iio_read_channel_attribute() - Read values from the device attribute.
* @chan: The channel being queried.
* @val: Value being written.
* @val2: Value being written.Val2 use depends on attribute type.
* @attribute: info attribute to be written.
*
* Returns an error code if failed. Else returns a description of what is in val
* and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
* + val2/1e6
*/
int iio_read_channel_attribute(struct iio_channel *chan, int *val,
int *val2, enum iio_chan_info_enum attribute);
/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.

View File

@@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Industrial I/O in kernel hardware consumer interface
*
* Copyright 2017 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#ifndef LINUX_IIO_HW_CONSUMER_H
#define LINUX_IIO_HW_CONSUMER_H
struct iio_hw_consumer;
struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
#endif

View File

@@ -20,34 +20,6 @@
* Currently assumes nano seconds.
*/
enum iio_chan_info_enum {
IIO_CHAN_INFO_RAW = 0,
IIO_CHAN_INFO_PROCESSED,
IIO_CHAN_INFO_SCALE,
IIO_CHAN_INFO_OFFSET,
IIO_CHAN_INFO_CALIBSCALE,
IIO_CHAN_INFO_CALIBBIAS,
IIO_CHAN_INFO_PEAK,
IIO_CHAN_INFO_PEAK_SCALE,
IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
IIO_CHAN_INFO_AVERAGE_RAW,
IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_SAMP_FREQ,
IIO_CHAN_INFO_FREQUENCY,
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
IIO_CHAN_INFO_INT_TIME,
IIO_CHAN_INFO_ENABLE,
IIO_CHAN_INFO_CALIBHEIGHT,
IIO_CHAN_INFO_CALIBWEIGHT,
IIO_CHAN_INFO_DEBOUNCE_COUNT,
IIO_CHAN_INFO_DEBOUNCE_TIME,
IIO_CHAN_INFO_CALIBEMISSIVITY,
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
};
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,

View File

@@ -34,4 +34,32 @@ enum iio_available_type {
IIO_AVAIL_RANGE,
};
enum iio_chan_info_enum {
IIO_CHAN_INFO_RAW = 0,
IIO_CHAN_INFO_PROCESSED,
IIO_CHAN_INFO_SCALE,
IIO_CHAN_INFO_OFFSET,
IIO_CHAN_INFO_CALIBSCALE,
IIO_CHAN_INFO_CALIBBIAS,
IIO_CHAN_INFO_PEAK,
IIO_CHAN_INFO_PEAK_SCALE,
IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
IIO_CHAN_INFO_AVERAGE_RAW,
IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_SAMP_FREQ,
IIO_CHAN_INFO_FREQUENCY,
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
IIO_CHAN_INFO_INT_TIME,
IIO_CHAN_INFO_ENABLE,
IIO_CHAN_INFO_CALIBHEIGHT,
IIO_CHAN_INFO_CALIBWEIGHT,
IIO_CHAN_INFO_DEBOUNCE_COUNT,
IIO_CHAN_INFO_DEBOUNCE_TIME,
IIO_CHAN_INFO_CALIBEMISSIVITY,
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
};
#endif /* _IIO_TYPES_H_ */

View File

@@ -21,22 +21,11 @@
#include <asm/thread_info.h>
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
#else
# define INIT_PUSHABLE_TASKS(tsk)
#endif
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
#else
#define INIT_CPUSET_SEQ(tsk)
#endif
extern struct nsproxy init_nsproxy;
extern struct group_info init_groups;
extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_POSIX_TIMERS
#define INIT_POSIX_TIMERS(s) \
.posix_timers = LIST_HEAD_INIT(s.posix_timers),
#define INIT_CPU_TIMERS(s) \
.cpu_timers = { \
LIST_HEAD_INIT(s.cpu_timers[0]), \
LIST_HEAD_INIT(s.cpu_timers[1]), \
LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#define INIT_CPUTIMER(s) \
.cputimer = { \
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = false, \
.checking_timer = false, \
LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#else
#define INIT_POSIX_TIMERS(s)
#define INIT_CPU_TIMERS(s)
#define INIT_CPUTIMER(s)
#endif
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
INIT_POSIX_TIMERS(sig) \
INIT_CPU_TIMERS(sig) \
.rlim = INIT_RLIMITS, \
INIT_CPUTIMER(sig) \
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
}
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = SIG_DFL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
}
extern struct group_info init_groups;
#define INIT_STRUCT_PID { \
.count = ATOMIC_INIT(1), \
.tasks = { \
{ .first = NULL }, \
{ .first = NULL }, \
{ .first = NULL }, \
}, \
.level = 0, \
.numbers = { { \
.nr = 0, \
.ns = &init_pid_ns, \
}, } \
}
#define INIT_PID_LINK(type) \
{ \
.node = { \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
.pid = &init_struct_pid, \
}
#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
.loginuid = INVALID_UID, \
.sessionid = (unsigned int)-1,
#else
#define INIT_IDS
#endif
#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
.rcu_blocked_node = NULL,
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
#ifdef CONFIG_TASKS_RCU
#define INIT_TASK_RCU_TASKS(tsk) \
.rcu_tasks_holdout = false, \
.rcu_tasks_holdout_list = \
LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
.rcu_tasks_idle_cpu = -1,
#else
#define INIT_TASK_RCU_TASKS(tsk)
#endif
extern struct cred init_cred;
#ifdef CONFIG_CGROUP_SCHED
# define INIT_CGROUP_SCHED(tsk) \
.sched_task_group = &root_task_group,
#else
# define INIT_CGROUP_SCHED(tsk)
#endif
#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
.vtime.starttime = 0, \
.vtime.state = VTIME_SYS,
#else
# define INIT_VTIME(tsk)
#endif
#define INIT_TASK_COMM "swapper"
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT_CACHED, \
.pi_top_task = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
#endif
#ifdef CONFIG_NUMA_BALANCING
# define INIT_NUMA_BALANCING(tsk) \
.numa_preferred_nid = -1, \
.numa_group = NULL, \
.numa_faults = NULL,
#else
# define INIT_NUMA_BALANCING(tsk)
#endif
#ifdef CONFIG_KASAN
# define INIT_KASAN(tsk) \
.kasan_depth = 1,
#else
# define INIT_KASAN(tsk)
#endif
#ifdef CONFIG_LIVEPATCH
# define INIT_LIVEPATCH(tsk) \
.patch_state = KLP_UNDEFINED,
#else
# define INIT_LIVEPATCH(tsk)
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk), \
.stack_refcount = ATOMIC_INIT(1),
#else
# define INIT_TASK_TI(tsk)
#endif
#ifdef CONFIG_SECURITY
#define INIT_TASK_SECURITY .security = NULL,
#else
#define INIT_TASK_SECURITY
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
INIT_TASK_TI(tsk) \
.state = 0, \
.stack = init_stack, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed= NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = RR_TIMESLICE, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \
INIT_CGROUP_SCHED(tsk) \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
RCU_POINTER_INITIALIZER(cred, &init_cred), \
.comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.nsproxy = &init_nsproxy, \
.pending = { \
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
INIT_CPU_TIMERS(tsk) \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
INIT_LIVEPATCH(tsk) \
INIT_TASK_SECURITY \
}
/* Attach to the init_task data structure for proper alignment */
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
#else
#define __init_task_data /**/
#endif
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#endif

43
include/linux/intel-pti.h Normal file
View File

@@ -0,0 +1,43 @@
/*
* Copyright (C) Intel 2011
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The PTI (Parallel Trace Interface) driver directs trace data routed from
* various parts in the system out through the Intel Penwell PTI port and
* out of the mobile device for analysis with a debugging tool
* (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
* compact JTAG, standard.
*
* This header file will allow other parts of the OS to use the
* interface to write out it's contents for debugging a mobile system.
*/
#ifndef LINUX_INTEL_PTI_H_
#define LINUX_INTEL_PTI_H_
/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
#define PTI_LASTDWORD_DTS 0x30
/* basic structure used as a write address to the PTI HW */
struct pti_masterchannel {
u8 master;
u8 channel;
};
/* the following functions are defined in misc/pti.c */
void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
struct pti_masterchannel *pti_request_masterchannel(u8 type,
const char *thread_name);
void pti_release_masterchannel(struct pti_masterchannel *mc);
#endif /* LINUX_INTEL_PTI_H_ */

View File

@@ -273,7 +273,8 @@ struct ipv6_pinfo {
* 100: prefer care-of address
*/
dontfrag:1,
autoflowlabel:1;
autoflowlabel:1,
autoflowlabel_set:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;

View File

@@ -212,6 +212,7 @@ struct irq_data {
* mask. Applies only to affinity managed irqs.
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -233,6 +234,7 @@ enum {
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
}
static inline void irqd_set_can_reserve(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_CAN_RESERVE;
}
static inline void irqd_clr_can_reserve(struct irq_data *d)
{
__irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
}
static inline bool irqd_can_reserve(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)

View File

@@ -13,10 +13,13 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
#define IRQ_WORK_PENDING BIT(0)
#define IRQ_WORK_BUSY BIT(1)
/* Doesn't want IPI, wait for tick: */
#define IRQ_WORK_LAZY BIT(2)
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
unsigned long flags;

View File

@@ -255,12 +255,15 @@ static inline bool irq_is_percpu_devid(unsigned int irq)
}
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc)
lockdep_set_class(&desc->lock, class);
if (desc) {
lockdep_set_class(&desc->lock, lock_class);
lockdep_set_class(&desc->request_mutex, request_class);
}
}
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI

View File

@@ -113,7 +113,7 @@ struct irq_domain_ops {
unsigned int nr_irqs, void *arg);
void (*free)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs);
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *out_hwirq, unsigned int *out_type);

View File

@@ -27,24 +27,19 @@
# define trace_hardirq_enter() \
do { \
current->hardirq_context++; \
crossrelease_hist_start(XHLOCK_HARD); \
} while (0)
# define trace_hardirq_exit() \
do { \
current->hardirq_context--; \
crossrelease_hist_end(XHLOCK_HARD); \
} while (0)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
crossrelease_hist_start(XHLOCK_SOFT); \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
crossrelease_hist_end(XHLOCK_SOFT); \
} while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@@ -58,7 +53,6 @@ do { \
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \

341
include/linux/iversion.h Normal file
View File

@@ -0,0 +1,341 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IVERSION_H
#define _LINUX_IVERSION_H
#include <linux/fs.h>
/*
* The inode->i_version field:
* ---------------------------
* The change attribute (i_version) is mandated by NFSv4 and is mostly for
* knfsd, but is also used for other purposes (e.g. IMA). The i_version must
* appear different to observers if there was a change to the inode's data or
* metadata since it was last queried.
*
* Observers see the i_version as a 64-bit number that never decreases. If it
* remains the same since it was last checked, then nothing has changed in the
* inode. If it's different then something has changed. Observers cannot infer
* anything about the nature or magnitude of the changes from the value, only
* that the inode has changed in some fashion.
*
* Not all filesystems properly implement the i_version counter. Subsystems that
* want to use i_version field on an inode should first check whether the
* filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
*
* Those that set SB_I_VERSION will automatically have their i_version counter
* incremented on writes to normal files. If the SB_I_VERSION is not set, then
* the VFS will not touch it on writes, and the filesystem can use it how it
* wishes. Note that the filesystem is always responsible for updating the
* i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
* We consider these sorts of filesystems to have a kernel-managed i_version.
*
* It may be impractical for filesystems to keep i_version updates atomic with
* respect to the changes that cause them. They should, however, guarantee
* that i_version updates are never visible before the changes that caused
* them. Also, i_version updates should never be delayed longer than it takes
* the original change to reach disk.
*
* This implementation uses the low bit in the i_version field as a flag to
* track when the value has been queried. If it has not been queried since it
* was last incremented, we can skip the increment in most cases.
*
* In the event that we're updating the ctime, we will usually go ahead and
* bump the i_version anyway. Since that has to go to stable storage in some
* fashion, we might as well increment it as well.
*
* With this implementation, the value should always appear to observers to
* increase over time if the file has changed. It's recommended to use
* inode_cmp_iversion() helper to compare values.
*
* Note that some filesystems (e.g. NFS and AFS) just use the field to store
* a server-provided value (for the most part). For that reason, those
* filesystems do not set SB_I_VERSION. These filesystems are considered to
* have a self-managed i_version.
*
* Persistently storing the i_version
* ----------------------------------
* Queries of the i_version field are not gated on them hitting the backing
* store. It's always possible that the host could crash after allowing
* a query of the value but before it has made it to disk.
*
* To mitigate this problem, filesystems should always use
* inode_set_iversion_queried when loading an existing inode from disk. This
* ensures that the next attempted inode increment will result in the value
* changing.
*
* Storing the value to disk therefore does not count as a query, so those
* filesystems should use inode_peek_iversion to grab the value to be stored.
* There is no need to flag the value as having been queried in that case.
*/
/*
* We borrow the lowest bit in the i_version to use as a flag to tell whether
* it has been queried since we last incremented it. If it has, then we must
* increment it on the next change. After that, we can clear the flag and
* avoid incrementing it again until it has again been queried.
*/
#define I_VERSION_QUERIED_SHIFT (1)
#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
/**
* inode_set_iversion_raw - set i_version to the specified raw value
* @inode: inode to set
* @val: new i_version value to set
*
* Set @inode's i_version field to @val. This function is for use by
* filesystems that self-manage the i_version.
*
* For example, the NFS client stores its NFSv4 change attribute in this way,
* and the AFS client stores the data_version from the server here.
*/
static inline void
inode_set_iversion_raw(struct inode *inode, u64 val)
{
atomic64_set(&inode->i_version, val);
}
/**
* inode_peek_iversion_raw - grab a "raw" iversion value
* @inode: inode from which i_version should be read
*
* Grab a "raw" inode->i_version value and return it. The i_version is not
* flagged or converted in any way. This is mostly used to access a self-managed
* i_version.
*
* With those filesystems, we want to treat the i_version as an entirely
* opaque value.
*/
static inline u64
inode_peek_iversion_raw(const struct inode *inode)
{
return atomic64_read(&inode->i_version);
}
/**
* inode_set_iversion - set i_version to a particular value
* @inode: inode to set
* @val: new i_version value to set
*
* Set @inode's i_version field to @val. This function is for filesystems with
* a kernel-managed i_version, for initializing a newly-created inode from
* scratch.
*
* In this case, we do not set the QUERIED flag since we know that this value
* has never been queried.
*/
static inline void
inode_set_iversion(struct inode *inode, u64 val)
{
inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
}
/**
* inode_set_iversion_queried - set i_version to a particular value as quereied
* @inode: inode to set
* @val: new i_version value to set
*
* Set @inode's i_version field to @val, and flag it for increment on the next
* change.
*
* Filesystems that persistently store the i_version on disk should use this
* when loading an existing inode from disk.
*
* When loading in an i_version value from a backing store, we can't be certain
* that it wasn't previously viewed before being stored. Thus, we must assume
* that it was, to ensure that we don't end up handing out the same value for
* different versions of the same inode.
*/
static inline void
inode_set_iversion_queried(struct inode *inode, u64 val)
{
inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
I_VERSION_QUERIED);
}
/**
* inode_maybe_inc_iversion - increments i_version
* @inode: inode with the i_version that should be updated
* @force: increment the counter even if it's not necessary?
*
* Every time the inode is modified, the i_version field must be seen to have
* changed by any observer.
*
* If "force" is set or the QUERIED flag is set, then ensure that we increment
* the value, and clear the queried flag.
*
* In the common case where neither is set, then we can return "false" without
* updating i_version.
*
* If this function returns false, and no other metadata has changed, then we
* can avoid logging the metadata.
*/
static inline bool
inode_maybe_inc_iversion(struct inode *inode, bool force)
{
u64 cur, old, new;
/*
* The i_version field is not strictly ordered with any other inode
* information, but the legacy inode_inc_iversion code used a spinlock
* to serialize increments.
*
* Here, we add full memory barriers to ensure that any de-facto
* ordering with other info is preserved.
*
* This barrier pairs with the barrier in inode_query_iversion()
*/
smp_mb();
cur = inode_peek_iversion_raw(inode);
for (;;) {
/* If flag is clear then we needn't do anything */
if (!force && !(cur & I_VERSION_QUERIED))
return false;
/* Since lowest bit is flag, add 2 to avoid it */
new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
old = atomic64_cmpxchg(&inode->i_version, cur, new);
if (likely(old == cur))
break;
cur = old;
}
return true;
}
/**
* inode_inc_iversion - forcibly increment i_version
* @inode: inode that needs to be updated
*
* Forcbily increment the i_version field. This always results in a change to
* the observable value.
*/
static inline void
inode_inc_iversion(struct inode *inode)
{
inode_maybe_inc_iversion(inode, true);
}
/**
* inode_iversion_need_inc - is the i_version in need of being incremented?
* @inode: inode to check
*
* Returns whether the inode->i_version counter needs incrementing on the next
* change. Just fetch the value and check the QUERIED flag.
*/
static inline bool
inode_iversion_need_inc(struct inode *inode)
{
return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
}
/**
* inode_inc_iversion_raw - forcibly increment raw i_version
* @inode: inode that needs to be updated
*
* Forcbily increment the raw i_version field. This always results in a change
* to the raw value.
*
* NFS will use the i_version field to store the value from the server. It
* mostly treats it as opaque, but in the case where it holds a write
* delegation, it must increment the value itself. This function does that.
*/
static inline void
inode_inc_iversion_raw(struct inode *inode)
{
atomic64_inc(&inode->i_version);
}
/**
* inode_peek_iversion - read i_version without flagging it to be incremented
* @inode: inode from which i_version should be read
*
* Read the inode i_version counter for an inode without registering it as a
* query.
*
* This is typically used by local filesystems that need to store an i_version
* on disk. In that situation, it's not necessary to flag it as having been
* viewed, as the result won't be used to gauge changes from that point.
*/
static inline u64
inode_peek_iversion(const struct inode *inode)
{
return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
}
/**
* inode_query_iversion - read i_version for later use
* @inode: inode from which i_version should be read
*
* Read the inode i_version counter. This should be used by callers that wish
* to store the returned i_version for later comparison. This will guarantee
* that a later query of the i_version will result in a different value if
* anything has changed.
*
* In this implementation, we fetch the current value, set the QUERIED flag and
* then try to swap it into place with a cmpxchg, if it wasn't already set. If
* that fails, we try again with the newly fetched value from the cmpxchg.
*/
static inline u64
inode_query_iversion(struct inode *inode)
{
u64 cur, old, new;
cur = inode_peek_iversion_raw(inode);
for (;;) {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
/*
* This barrier (and the implicit barrier in the
* cmpxchg below) pairs with the barrier in
* inode_maybe_inc_iversion().
*/
smp_mb();
break;
}
new = cur | I_VERSION_QUERIED;
old = atomic64_cmpxchg(&inode->i_version, cur, new);
if (likely(old == cur))
break;
cur = old;
}
return cur >> I_VERSION_QUERIED_SHIFT;
}
/**
* inode_cmp_iversion_raw - check whether the raw i_version counter has changed
* @inode: inode to check
* @old: old value to check against its i_version
*
* Compare the current raw i_version counter with a previous one. Returns 0 if
* they are the same or non-zero if they are different.
*/
static inline s64
inode_cmp_iversion_raw(const struct inode *inode, u64 old)
{
return (s64)inode_peek_iversion_raw(inode) - (s64)old;
}
/**
* inode_cmp_iversion - check whether the i_version counter has changed
* @inode: inode to check
* @old: old value to check against its i_version
*
* Compare an i_version counter with a previous one. Returns 0 if they are
* the same, a positive value if the one in the inode appears newer than @old,
* and a negative value if @old appears to be newer than the one in the
* inode.
*
* Note that we don't need to set the QUERIED flag in this case, as the value
* in the inode is not being recorded for later use.
*/
static inline s64
inode_cmp_iversion(const struct inode *inode, u64 old)
{
return (s64)(inode_peek_iversion_raw(inode) & ~I_VERSION_QUERIED) -
(s64)(old << I_VERSION_QUERIED_SHIFT);
}
#endif

View File

@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.

View File

@@ -50,10 +50,7 @@ struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
struct nvm_dev_ops {
nvm_id_fn *identity;
nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
@@ -112,8 +108,6 @@ enum {
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
NVM_OP_HBREAD = 0x02,
NVM_OP_HBWRITE = 0x81,
NVM_OP_PWRITE = 0x91,
NVM_OP_PREAD = 0x92,
NVM_OP_ERASE = 0x90,
@@ -165,12 +159,16 @@ struct nvm_id_group {
u8 fmtype;
u8 num_ch;
u8 num_lun;
u8 num_pln;
u16 num_blk;
u16 num_pg;
u16 fpg_sz;
u16 num_chk;
u16 clba;
u16 csecs;
u16 sos;
u16 ws_min;
u16 ws_opt;
u16 ws_seq;
u16 ws_per_chk;
u32 trdt;
u32 trdm;
u32 tprt;
@@ -181,7 +179,10 @@ struct nvm_id_group {
u32 mccap;
u16 cpar;
struct nvm_id_lp_tbl lptbl;
/* 1.2 compatibility */
u8 num_pln;
u16 num_pg;
u16 fpg_sz;
};
struct nvm_addr_format {
@@ -217,6 +218,10 @@ struct nvm_target {
#define ADDR_EMPTY (~0ULL)
#define NVM_TARGET_DEFAULT_OP (101)
#define NVM_TARGET_MIN_OP (3)
#define NVM_TARGET_MAX_OP (80)
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
@@ -239,7 +244,6 @@ struct nvm_rq {
void *meta_list;
dma_addr_t dma_meta_list;
struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
@@ -268,31 +272,38 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
/* Device generic information */
struct nvm_geo {
/* generic geometry */
int nr_chnls;
int nr_luns;
int luns_per_chnl; /* -1 if channels are not symmetric */
int nr_planes;
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
int fpg_size;
int pfpg_size; /* size of buffer if all pages are to be read */
int all_luns; /* across channels */
int nr_luns; /* per channel */
int nr_chks; /* per lun */
int sec_size;
int oob_size;
int mccap;
int sec_per_chk;
int sec_per_lun;
int ws_min;
int ws_opt;
int ws_seq;
int ws_per_chk;
int max_rq_size;
int op;
struct nvm_addr_format ppaf;
/* Calculated/Cached values. These do not reflect the actual usable
* blocks at run-time.
*/
int max_rq_size;
/* Legacy 1.2 specific geometry */
int plane_mode; /* drive device in single, double or quad mode */
int nr_planes;
int sec_per_pg; /* only sectors for a single page */
int sec_per_pl; /* all sectors across planes */
int sec_per_blk;
int sec_per_lun;
};
/* sub-device structure */
@@ -320,10 +331,6 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
/* lower page table */
int lps_per_blk;
int *lptbl;
unsigned long total_secs;
unsigned long *lun_map;
@@ -346,36 +353,6 @@ struct nvm_dev {
struct list_head targets;
};
static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
u64 pba)
{
struct ppa_addr l;
int secs, pgs, blks, luns;
sector_t ppa = pba;
l.ppa = 0;
div_u64_rem(ppa, geo->sec_per_pg, &secs);
l.g.sec = secs;
sector_div(ppa, geo->sec_per_pg);
div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, geo->pgs_per_blk);
div_u64_rem(ppa, geo->blks_per_lun, &blks);
l.g.blk = blks;
sector_div(ppa, geo->blks_per_lun);
div_u64_rem(ppa, geo->luns_per_chnl, &luns);
l.g.lun = luns;
sector_div(ppa, geo->luns_per_chnl);
l.g.ch = ppa;
return l;
}
static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr r)
{
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
return l;
}
static inline int ppa_empty(struct ppa_addr ppa_addr)
{
return (ppa_addr.ppa == ADDR_EMPTY);
}
static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
{
ppa_addr->ppa = ADDR_EMPTY;
}
static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
{
if (ppa_empty(ppa1) || ppa_empty(ppa2))
return 0;
return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
(ppa1.g.blk == ppa2.g.blk));
}
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
#else /* CONFIG_NVM */
struct nvm_dev_ops;

View File

@@ -158,12 +158,6 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Whether it's a crosslock.
*/
int cross;
#endif
};
static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,95 +261,8 @@ struct held_lock {
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Generation id.
*
* A value of cross_gen_id will be stored when holding this,
* which is globally increased whenever each crosslock is held.
*/
unsigned int gen_id;
#endif
};
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCK_TRACE_ENTRIES 5
/*
* This is for keeping locks waiting for commit so that true dependencies
* can be added at commit step.
*/
struct hist_lock {
/*
* Id for each entry in the ring buffer. This is used to
* decide whether the ring buffer was overwritten or not.
*
* For example,
*
* |<----------- hist_lock ring buffer size ------->|
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
*
* where 'p' represents an acquisition in process
* context, 'i' represents an acquisition in irq
* context.
*
* In this example, the ring buffer was overwritten by
* acquisitions in irq context, that should be detected on
* rollback or commit.
*/
unsigned int hist_id;
/*
* Seperate stack_trace data. This will be used at commit step.
*/
struct stack_trace trace;
unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};
/*
* To initialize a lock as crosslock, lockdep_init_map_crosslock() should
* be called instead of lockdep_init_map().
*/
struct cross_lock {
/*
* When more than one acquisition of crosslocks are overlapped,
* we have to perform commit for them based on cross_gen_id of
* the first acquisition, which allows us to add more true
* dependencies.
*
* Moreover, when no acquisition of a crosslock is in progress,
* we should not perform commit because the lock might not exist
* any more, which might cause incorrect memory access. So we
* have to track the number of acquisitions of a crosslock.
*/
int nr_acquire;
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};
struct lockdep_map_cross {
struct lockdep_map map;
struct cross_lock xlock;
};
#endif
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -430,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
extern int lock_is_held_type(struct lockdep_map *lock, int read);
extern int lock_is_held_type(const struct lockdep_map *lock, int read);
static inline int lock_is_held(struct lockdep_map *lock)
static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -460,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
@@ -519,7 +424,6 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
@@ -560,37 +464,6 @@ enum xhlock_context_t {
XHLOCK_CTX_NR,
};
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
const char *name,
struct lock_class_key *key,
int subclass);
extern void lock_commit_crosslock(struct lockdep_map *lock);
/*
* What we essencially have to initialize is 'nr_acquire'. Other members
* will be initialized in add_xlock().
*/
#define STATIC_CROSS_LOCK_INIT() \
{ .nr_acquire = 0,}
#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
{ .map.name = (_name), .map.key = (void *)(_key), \
.map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_invariant_state(bool force);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else /* !CROSSRELEASE */
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
@@ -599,12 +472,9 @@ extern void lockdep_free_task(struct task_struct *task);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#endif /* CROSSRELEASE */
#ifdef CONFIG_LOCK_STAT

View File

@@ -645,11 +645,6 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
struct axp288_extcon_pdata {
/* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
struct gpio_desc *gpio_mux_cntl;
};
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)

View File

@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
/* debugfs stuff */
int cros_ec_debugfs_init(struct cros_ec_dev *ec);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
/* ACPI GPE handler */
#ifdef CONFIG_ACPI

View File

@@ -2904,16 +2904,33 @@ enum usb_pd_control_mux {
USB_PD_CTRL_MUX_AUTO = 5,
};
enum usb_pd_control_swap {
USB_PD_CTRL_SWAP_NONE = 0,
USB_PD_CTRL_SWAP_DATA = 1,
USB_PD_CTRL_SWAP_POWER = 2,
USB_PD_CTRL_SWAP_VCONN = 3,
USB_PD_CTRL_SWAP_COUNT
};
struct ec_params_usb_pd_control {
uint8_t port;
uint8_t role;
uint8_t mux;
uint8_t swap;
} __packed;
#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
struct ec_response_usb_pd_control_v1 {
uint8_t enabled;
uint8_t role;

View File

@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3

View File

@@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Core definitions for RAVE SP MFD driver.
*
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
#ifndef _LINUX_RAVE_SP_H_
#define _LINUX_RAVE_SP_H_
#include <linux/notifier.h>
enum rave_sp_command {
RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
RAVE_SP_CMD_BOOT_SOURCE = 0x26,
RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
RAVE_SP_CMD_STATUS = 0xA0,
RAVE_SP_CMD_SW_WDT = 0xA1,
RAVE_SP_CMD_PET_WDT = 0xA2,
RAVE_SP_CMD_RESET = 0xA7,
RAVE_SP_CMD_RESET_REASON = 0xA8,
RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
RAVE_SP_EVNT_BASE = 0xE0,
};
struct rave_sp;
static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
{
return ((unsigned long)value << 8) | event;
}
static inline u8 rave_sp_action_unpack_event(unsigned long action)
{
return action;
}
static inline u8 rave_sp_action_unpack_value(unsigned long action)
{
return action >> 8;
}
int rave_sp_exec(struct rave_sp *sp,
void *__data, size_t data_size,
void *reply_data, size_t reply_data_size);
struct device;
int devm_rave_sp_register_event_notifier(struct device *dev,
struct notifier_block *nb);
#endif /* _LINUX_RAVE_SP_H_ */

View File

@@ -1,13 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* STM32 Low-Power Timer parent driver.
*
* Copyright (C) STMicroelectronics 2017
*
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
*
* Inspired by Benjamin Gaignard's stm32-timers driver
*
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_LPTIMER_H_

View File

@@ -1,9 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
*
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_

View File

@@ -25,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
#define CNF_CMD 0x04
#define CNF_CTL_BASE 0x10
#define CNF_INT_PIN 0x3d
#define CNF_STOP_CLK_CTL 0x40
#define CNF_GCLK_CTL 0x41
#define CNF_SD_CLK_MODE 0x42
#define CNF_PIN_STATUS 0x44
#define CNF_PWR_CTL_1 0x48
#define CNF_PWR_CTL_2 0x49
#define CNF_PWR_CTL_3 0x4a
#define CNF_CARD_DETECT_MODE 0x4c
#define CNF_SD_SLOT 0x50
#define CNF_EXT_GCLK_CTL_1 0xf0
#define CNF_EXT_GCLK_CTL_2 0xf1
#define CNF_EXT_GCLK_CTL_3 0xf9
#define CNF_SD_LED_EN_1 0xfa
#define CNF_SD_LED_EN_2 0xfe
#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \

View File

@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
@@ -556,6 +557,7 @@ struct mlx5_core_sriov {
};
struct mlx5_irq_info {
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
@@ -1048,7 +1050,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1164,6 +1166,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
@@ -1226,7 +1232,23 @@ enum {
static inline const struct cpumask *
mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
{
return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
const struct cpumask *mask;
struct irq_desc *desc;
unsigned int irq;
int eqn;
int err;
err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
if (err)
return NULL;
desc = irq_to_desc(irq);
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
#else
mask = desc->irq_common_data.affinity;
#endif
return mask;
}
#endif /* MLX5_DRIVER_H */

View File

@@ -147,7 +147,7 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@@ -1027,8 +1027,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
u8 disable_local_lb[0x1];
u8 reserved_at_3e2[0x9];
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 reserved_at_3e3[0x8];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -7239,7 +7240,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 vxlan_udp_port[0x10];
};
struct mlx5_ifc_set_rate_limit_out_bits {
struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7248,7 +7249,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_rate_limit_in_bits {
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -7261,6 +7262,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
u8 reserved_at_60[0x20];
u8 rate_limit[0x20];
u8 reserved_at_a0[0x160];
};
struct mlx5_ifc_access_register_out_bits {

View File

@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
unsigned int use_blk_mq:1; /* use blk-mq */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
struct dentry *debugfs_root;
struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;

View File

@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
bool mmc_can_gpio_ro(struct mmc_host *host);
#endif

View File

@@ -801,6 +801,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
#ifdef RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
{
return true;
}
#endif
#ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module)
{

View File

@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
#define map_word_equal(map, val1, val2) \
({ \
int i, ret = 1; \
for (i = 0; i < map_words(map); i++) \
if ((val1).x[i] != (val2).x[i]) { \
ret = 0; \
break; \
} \
ret; \
})
static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
{
int i;
#define map_word_and(map, val1, val2) \
({ \
map_word r; \
int i; \
for (i = 0; i < map_words(map); i++) \
r.x[i] = (val1).x[i] & (val2).x[i]; \
r; \
})
for (i = 0; i < map_words(map); i++) {
if (val1.x[i] != val2.x[i])
return 0;
}
#define map_word_clr(map, val1, val2) \
({ \
map_word r; \
int i; \
for (i = 0; i < map_words(map); i++) \
r.x[i] = (val1).x[i] & ~(val2).x[i]; \
r; \
})
return 1;
}
#define map_word_or(map, val1, val2) \
({ \
map_word r; \
int i; \
for (i = 0; i < map_words(map); i++) \
r.x[i] = (val1).x[i] | (val2).x[i]; \
r; \
})
static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
{
map_word r;
int i;
#define map_word_andequal(map, val1, val2, val3) \
({ \
int i, ret = 1; \
for (i = 0; i < map_words(map); i++) { \
if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
ret = 0; \
break; \
} \
} \
ret; \
})
for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & val2.x[i];
return r;
}
static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
{
map_word r;
int i;
for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & ~val2.x[i];
return r;
}
static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
{
map_word r;
int i;
for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] | val2.x[i];
return r;
}
static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
{
int i;
for (i = 0; i < map_words(map); i++) {
if ((val1.x[i] & val2.x[i]) != val3.x[i])
return 0;
}
return 1;
}
static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
{
int i;
for (i = 0; i < map_words(map); i++) {
if (val1.x[i] & val2.x[i])
return 1;
}
return 0;
}
#define map_word_bitsset(map, val1, val2) \
({ \
int i, ret = 0; \
for (i = 0; i < map_words(map); i++) { \
if ((val1).x[i] & (val2).x[i]) { \
ret = 1; \
break; \
} \
} \
ret; \
})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{

View File

@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
/**
* mtd_align_erase_req - Adjust an erase request to align things on eraseblock
* boundaries.
* @mtd: the MTD device this erase request applies on
* @req: the erase request to adjust
*
* This function will adjust @req->addr and @req->len to align them on
* @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
*/
static inline void mtd_align_erase_req(struct mtd_info *mtd,
struct erase_info *req)
{
u32 mod;
if (WARN_ON(!mtd->erasesize))
return;
mod = mtd_mod_by_eb(req->addr, mtd);
if (mod) {
req->addr -= mod;
req->len += mod;
}
mod = mtd_mod_by_eb(req->addr + req->len, mtd);
if (mod)
req->len += mtd->erasesize - mod;
}
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)

View File

@@ -133,12 +133,6 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
/*
* If your controller already sends the required NAND commands when
* reading or writing a page, then the framework is not supposed to
* send READ0 and SEQIN/PAGEPROG respectively.
*/
#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
/*
* This option is defined if the board driver allocates its own buffers
* (e.g. because it needs them DMA-coherent).
*/
#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
* @priv: pointer to private ECC control data
* @calc_buf: buffer for calculated ECC, size is oobsize.
* @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
int postpad;
unsigned int options;
void *priv;
u8 *calc_buf;
u8 *code_buf;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
int page);
};
static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
{
return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
}
/**
* struct nand_buffers - buffer structure for read/write
* @ecccalc: buffer pointer for calculated ECC, size is oobsize.
* @ecccode: buffer pointer for ECC read from flash, size is oobsize.
* @databuf: buffer pointer for data, size is (page size + oobsize).
*
* Do not change the order of buffers. databuf and oobrbuf must be in
* consecutive order.
*/
struct nand_buffers {
uint8_t *ecccalc;
uint8_t *ecccode;
uint8_t *databuf;
};
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
@@ -761,6 +734,350 @@ struct nand_manufacturer_ops {
void (*cleanup)(struct nand_chip *chip);
};
/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
struct nand_op_cmd_instr {
u8 opcode;
};
/**
* struct nand_op_addr_instr - Definition of an address instruction
* @naddrs: length of the @addrs array
* @addrs: array containing the address cycles to issue
*/
struct nand_op_addr_instr {
unsigned int naddrs;
const u8 *addrs;
};
/**
* struct nand_op_data_instr - Definition of a data instruction
* @len: number of data bytes to move
* @in: buffer to fill when reading from the NAND chip
* @out: buffer to read from when writing to the NAND chip
* @force_8bit: force 8-bit access
*
* Please note that "in" and "out" are inverted from the ONFI specification
* and are from the controller perspective, so a "in" is a read from the NAND
* chip while a "out" is a write to the NAND chip.
*/
struct nand_op_data_instr {
unsigned int len;
union {
void *in;
const void *out;
} buf;
bool force_8bit;
};
/**
* struct nand_op_waitrdy_instr - Definition of a wait ready instruction
* @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
*/
struct nand_op_waitrdy_instr {
unsigned int timeout_ms;
};
/**
* enum nand_op_instr_type - Definition of all instruction types
* @NAND_OP_CMD_INSTR: command instruction
* @NAND_OP_ADDR_INSTR: address instruction
* @NAND_OP_DATA_IN_INSTR: data in instruction
* @NAND_OP_DATA_OUT_INSTR: data out instruction
* @NAND_OP_WAITRDY_INSTR: wait ready instruction
*/
enum nand_op_instr_type {
NAND_OP_CMD_INSTR,
NAND_OP_ADDR_INSTR,
NAND_OP_DATA_IN_INSTR,
NAND_OP_DATA_OUT_INSTR,
NAND_OP_WAITRDY_INSTR,
};
/**
* struct nand_op_instr - Instruction object
* @type: the instruction type
* @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
* You'll have to use the appropriate element
* depending on @type
* @delay_ns: delay the controller should apply after the instruction has been
* issued on the bus. Most modern controllers have internal timings
* control logic, and in this case, the controller driver can ignore
* this field.
*/
struct nand_op_instr {
enum nand_op_instr_type type;
union {
struct nand_op_cmd_instr cmd;
struct nand_op_addr_instr addr;
struct nand_op_data_instr data;
struct nand_op_waitrdy_instr waitrdy;
} ctx;
unsigned int delay_ns;
};
/*
* Special handling must be done for the WAITRDY timeout parameter as it usually
* is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
* tBERS (during an erase) which all of them are u64 values that cannot be
* divided by usual kernel macros and must be handled with the special
* DIV_ROUND_UP_ULL() macro.
*/
#define __DIVIDE(dividend, divisor) ({ \
sizeof(dividend) == sizeof(u32) ? \
DIV_ROUND_UP(dividend, divisor) : \
DIV_ROUND_UP_ULL(dividend, divisor); \
})
#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
#define NAND_OP_CMD(id, ns) \
{ \
.type = NAND_OP_CMD_INSTR, \
.ctx.cmd.opcode = id, \
.delay_ns = ns, \
}
#define NAND_OP_ADDR(ncycles, cycles, ns) \
{ \
.type = NAND_OP_ADDR_INSTR, \
.ctx.addr = { \
.naddrs = ncycles, \
.addrs = cycles, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_DATA_IN(l, b, ns) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.ctx.data = { \
.len = l, \
.buf.in = b, \
.force_8bit = false, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_DATA_OUT(l, b, ns) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.ctx.data = { \
.len = l, \
.buf.out = b, \
.force_8bit = false, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.ctx.data = { \
.len = l, \
.buf.in = b, \
.force_8bit = true, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.ctx.data = { \
.len = l, \
.buf.out = b, \
.force_8bit = true, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_WAIT_RDY(tout_ms, ns) \
{ \
.type = NAND_OP_WAITRDY_INSTR, \
.ctx.waitrdy.timeout_ms = tout_ms, \
.delay_ns = ns, \
}
/**
* struct nand_subop - a sub operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
* of the sub-operation
* @last_instr_end_off: offset to end at (excluded) for the last instruction
* of the sub-operation
*
* Both @first_instr_start_off and @last_instr_end_off only apply to data or
* address instructions.
*
* When an operation cannot be handled as is by the NAND controller, it will
* be split by the parser into sub-operations which will be passed to the
* controller driver.
*/
struct nand_subop {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
unsigned int last_instr_end_off;
};
int nand_subop_get_addr_start_off(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_data_start_off(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_data_len(const struct nand_subop *subop,
unsigned int op_id);
/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
* @maxcycles: maximum number of address cycles the controller can issue in a
* single step
*/
struct nand_op_parser_addr_constraints {
unsigned int maxcycles;
};
/**
* struct nand_op_parser_data_constraints - Constraints for data instructions
* @maxlen: maximum data length that the controller can handle in a single step
*/
struct nand_op_parser_data_constraints {
unsigned int maxlen;
};
/**
* struct nand_op_parser_pattern_elem - One element of a pattern
* @type: the instructuction type
* @optional: whether this element of the pattern is optional or mandatory
* @addr/@data: address or data constraint (number of cycles or data length)
*/
struct nand_op_parser_pattern_elem {
enum nand_op_instr_type type;
bool optional;
union {
struct nand_op_parser_addr_constraints addr;
struct nand_op_parser_data_constraints data;
} ctx;
};
#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
{ \
.type = NAND_OP_CMD_INSTR, \
.optional = _opt, \
}
#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
{ \
.type = NAND_OP_ADDR_INSTR, \
.optional = _opt, \
.ctx.addr.maxcycles = _maxcycles, \
}
#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.optional = _opt, \
.ctx.data.maxlen = _maxlen, \
}
#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.optional = _opt, \
.ctx.data.maxlen = _maxlen, \
}
#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
{ \
.type = NAND_OP_WAITRDY_INSTR, \
.optional = _opt, \
}
/**
* struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
* @elems: array of pattern elements
* @nelems: number of pattern elements in @elems array
* @exec: the function that will issue a sub-operation
*
* A pattern is a list of elements, each element reprensenting one instruction
* with its constraints. The pattern itself is used by the core to match NAND
* chip operation with NAND controller operations.
* Once a match between a NAND controller operation pattern and a NAND chip
* operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
* hook is called so that the controller driver can issue the operation on the
* bus.
*
* Controller drivers should declare as many patterns as they support and pass
* this list of patterns (created with the help of the following macro) to
* the nand_op_parser_exec_op() helper.
*/
struct nand_op_parser_pattern {
const struct nand_op_parser_pattern_elem *elems;
unsigned int nelems;
int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
};
#define NAND_OP_PARSER_PATTERN(_exec, ...) \
{ \
.exec = _exec, \
.elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
.nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern_elem), \
}
/**
* struct nand_op_parser - NAND controller operation parser descriptor
* @patterns: array of supported patterns
* @npatterns: length of the @patterns array
*
* The parser descriptor is just an array of supported patterns which will be
* iterated by nand_op_parser_exec_op() everytime it tries to execute an
* NAND operation (or tries to determine if a specific operation is supported).
*
* It is worth mentioning that patterns will be tested in their declaration
* order, and the first match will be taken, so it's important to order patterns
* appropriately so that simple/inefficient patterns are placed at the end of
* the list. Usually, this is where you put single instruction patterns.
*/
struct nand_op_parser {
const struct nand_op_parser_pattern *patterns;
unsigned int npatterns;
};
#define NAND_OP_PARSER(...) \
{ \
.patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
.npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern), \
}
/**
* struct nand_operation - NAND operation descriptor
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
#define NAND_OPERATION(_instrs) \
{ \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
* commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready.
* @exec_op: controller specific method to execute NAND operations.
* This method replaces ->cmdfunc(),
* ->{read,write}_{buf,byte,word}(), ->dev_ready() and
* ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buffers: buffer structure for read/write
* @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
* @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
* @pagebuf: [INTERN] holds the pagenumber which is currently in
* data_buf.
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
int (*exec_op)(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
int numchips;
uint64_t chipsize;
int pagemask;
u8 *data_buf;
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
u16 max_bb_per_die;
u32 blocks_per_die;
struct nand_data_interface *data_interface;
struct nand_data_interface data_interface;
int read_retries;
@@ -938,7 +1262,6 @@ struct nand_chip {
struct nand_hw_control *controller;
struct nand_ecc_ctrl ecc;
struct nand_buffers *buffers;
unsigned long buf_align;
struct nand_hw_control hwcontrol;
@@ -956,6 +1279,15 @@ struct nand_chip {
} manufacturer;
};
static inline int nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op)
{
if (!chip->exec_op)
return -ENOTSUPP;
return chip->exec_op(chip, op, false);
}
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
int onfi_init_data_interface(struct nand_chip *chip,
struct nand_data_interface *iface,
int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode);
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
/* get data interface from ONFI timing mode 0, used after reset. */
const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
/* NAND operation helpers */
int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
int nand_change_read_column_op(struct nand_chip *chip,
unsigned int offset_in_page, void *buf,
unsigned int len, bool force_8bit);
int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len);
int nand_prog_page_end_op(struct nand_chip *chip);
int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len);
int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
#endif /* __LINUX_MTD_RAWNAND_H */

View File

@@ -61,6 +61,7 @@
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
#define FSR_READY BIT(7)
#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
#define FSR_E_ERR BIT(5) /* Erase operation status */
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
/**
* spi_nor_restore_addr_mode() - restore the status of SPI NOR
* @nor: the spi_nor structure
*/
void spi_nor_restore(struct spi_nor *nor);
#endif

View File

@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds the NFNL subsystem mutex.
* the READ_ONCE(), because caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))

View File

@@ -85,7 +85,7 @@ struct netlink_ext_ack {
* to the lack of an output buffer.)
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
static const char __msg[] = (msg); \
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) \
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
} while (0)
#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
static const char __msg[] = (msg); \
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \

View File

@@ -5,20 +5,36 @@
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
Some of the constants in here are from the corresponding
NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
rest of them on our own. */
*/
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
struct proc_dir_entry;
struct seq_file;
struct nubus_dir {
unsigned char *base;
unsigned char *ptr;
int done;
int mask;
struct proc_dir_entry *procdir;
};
struct nubus_dirent {
unsigned char *base;
unsigned char type;
__u32 data; /* Actually 24 bits used */
int mask;
};
struct nubus_board {
struct nubus_board* next;
struct nubus_dev* first_dev;
struct device dev;
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
unsigned char* fblock;
unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
unsigned char* directory;
unsigned char *directory;
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
/* Directory entry in /proc/bus/nubus */
struct proc_dir_entry *procdir;
};
struct nubus_dev {
/* Next link in device list */
struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
struct proc_dir_entry* procdir;
struct nubus_rsrc {
struct list_head list;
/* The functional resource ID of this device */
/* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
/* This is the device's name rather than the board's.
Sometimes they are different. Usually the board name is
more correct. */
char name[64];
/* MacOS driver (I kid you not) */
unsigned char* driver;
/* Actually this is an offset */
unsigned long iobase;
unsigned long iosize;
unsigned char flags, hwdevid;
/* Functional directory */
unsigned char* directory;
unsigned char *directory;
/* Much of our info comes from here */
struct nubus_board* board;
struct nubus_board *board;
};
/* This is all NuBus devices (used to find devices later on) */
extern struct nubus_dev* nubus_devices;
/* This is all NuBus cards */
extern struct nubus_board* nubus_boards;
/* This is all NuBus functional resources (used to find devices later on) */
extern struct list_head nubus_func_rsrcs;
struct nubus_driver {
struct device_driver driver;
int (*probe)(struct nubus_board *board);
int (*remove)(struct nubus_board *board);
};
extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
extern void nubus_proc_init(void);
void nubus_proc_init(void);
struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
struct nubus_board *board);
void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
unsigned int size);
void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
static inline
struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{ return NULL; }
static inline
struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
struct nubus_board *board)
{ return NULL; }
static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
unsigned int size) {}
static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent) {}
#endif
int get_nubus_list(char *buf);
int nubus_proc_attach_device(struct nubus_dev *dev);
/* If we need more precision we can add some more of these */
struct nubus_dev* nubus_find_device(unsigned short category,
unsigned short type,
unsigned short dr_hw,
unsigned short dr_sw,
const struct nubus_dev* from);
struct nubus_dev* nubus_find_type(unsigned short category,
unsigned short type,
const struct nubus_dev* from);
/* Might have more than one device in a slot, you know... */
struct nubus_dev* nubus_find_slot(unsigned int slot,
const struct nubus_dev* from);
struct nubus_rsrc *nubus_first_rsrc_or_null(void);
struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
#define for_each_func_rsrc(f) \
for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
#define for_each_board_func_rsrc(b, f) \
for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
int nubus_get_root_dir(const struct nubus_board* board,
struct nubus_dir* dir);
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir);
/* The board directory */
int nubus_get_board_dir(const struct nubus_board* board,
struct nubus_dir* dir);
int nubus_get_board_dir(const struct nubus_board *board,
struct nubus_dir *dir);
/* The functional directory */
int nubus_get_func_dir(const struct nubus_dev* dev,
struct nubus_dir* dir);
int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
int nubus_readdir(struct nubus_dir* dir,
struct nubus_dirent* ent);
int nubus_find_rsrc(struct nubus_dir* dir,
int nubus_readdir(struct nubus_dir *dir,
struct nubus_dirent *ent);
int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
struct nubus_dirent* ent);
int nubus_rewinddir(struct nubus_dir* dir);
struct nubus_dirent *ent);
int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
int nubus_get_subdir(const struct nubus_dirent* ent,
struct nubus_dir* dir);
void nubus_get_rsrc_mem(void* dest,
const struct nubus_dirent *dirent,
int len);
void nubus_get_rsrc_str(void* dest,
const struct nubus_dirent *dirent,
int maxlen);
int nubus_get_subdir(const struct nubus_dirent *ent,
struct nubus_dir *dir);
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
unsigned int len);
unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
unsigned int len);
void nubus_seq_write_rsrc_mem(struct seq_file *m,
const struct nubus_dirent *dirent,
unsigned int len);
unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
/* Declarations relating to driver model objects */
int nubus_bus_register(void);
int nubus_device_register(struct nubus_board *board);
int nubus_driver_register(struct nubus_driver *ndrv);
void nubus_driver_unregister(struct nubus_driver *ndrv);
int nubus_proc_show(struct seq_file *m, void *data);
static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
{
dev_set_drvdata(&board->dev, data);
}
static inline void *nubus_get_drvdata(struct nubus_board *board)
{
return dev_get_drvdata(&board->dev);
}
/* Returns a pointer to the "standard" slot space. */
static inline void *nubus_slot_addr(int slot)
{
return (void *)(0xF0000000 | (slot << 24));
}
#endif /* LINUX_NUBUS_H */

View File

@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
enum {
NVME_CMBSZ_SQS = 1 << 0,
NVME_CMBSZ_CQS = 1 << 1,
NVME_CMBSZ_LISTS = 1 << 2,
NVME_CMBSZ_RDS = 1 << 3,
NVME_CMBSZ_WDS = 1 << 4,
NVME_CMBSZ_SZ_SHIFT = 12,
NVME_CMBSZ_SZ_MASK = 0xfffff,
NVME_CMBSZ_SZU_SHIFT = 8,
NVME_CMBSZ_SZU_MASK = 0xf,
};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.

View File

@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
struct gpmc_onenand_info {
bool sync_read;
bool sync_write;
int burst_len;
};
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
/**
* gpmc_omap_onenand_set_timings - set optimized sync timings.
* @cs: Chip Select Region
* @freq: Chip frequency
* @latency: Burst latency cycle count
* @info: Structure describing parameters used
*
* Sets optimized timings for the @cs region based on @freq and @latency.
* Updates the @info structure based on the GPMC settings.
*/
int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
int latency,
struct gpmc_onenand_info *info);
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
static inline
int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
int latency,
struct gpmc_onenand_info *info)
{
return -EINVAL;
}
#endif /* CONFIG_OMAP_GPMC */
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,

View File

@@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
return tsk->signal->oom_mm;
}
/*
* Use this helper if tsk->mm != mm and the victim mm needs a special
* handling. This is guaranteed to stay true after once set.
*/
static inline bool mm_is_oom_victim(struct mm_struct *mm)
{
return test_bit(MMF_OOM_VICTIM, &mm->flags);
}
/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the

View File

@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
unsigned int devfn)
{ return NULL; }
static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
unsigned int bus, unsigned int devfn)
{ return NULL; }
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }

View File

@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
*
* The smp_read_barrier_depends() implied by READ_ONCE() pairs
* with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
/* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
smp_read_barrier_depends();
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be

View File

@@ -1,34 +0,0 @@
/*
* Copyright (C) 2006 Nokia Corporation
* Author: Juha Yrjola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MTD_ONENAND_OMAP2_H
#define __MTD_ONENAND_OMAP2_H
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#define ONENAND_SYNC_READ (1 << 0)
#define ONENAND_SYNC_READWRITE (1 << 1)
#define ONENAND_IN_OMAP34XX (1 << 2)
struct omap_onenand_platform_data {
int cs;
int gpio_irq;
struct mtd_partition *parts;
int nr_parts;
int (*onenand_setup)(void __iomem *, int *freq_ptr);
int dma_channel;
u8 flags;
u8 regulator_can_sleep;
u8 skip_initial_unlocking;
/* for passing the partitions */
struct device_node *of_node;
};
#endif

View File

@@ -1,10 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H

View File

@@ -556,9 +556,10 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
* NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
* NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
* SMART_PREPARE: Check the return value of the driver's ->prepare callback.
* SMART_SUSPEND: No need to resume the device from runtime suspend.
* LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
*
* Setting SMART_PREPARE instructs bus types and PM domains which may want
* system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
* necessary from the driver's perspective. It also may cause them to skip
* invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
* the driver if they decide to leave the device in runtime suspend.
*
* Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
* driver prefers the device to be left in suspend after system resume.
*/
#define DPM_FLAG_NEVER_SKIP BIT(0)
#define DPM_FLAG_SMART_PREPARE BIT(1)
#define DPM_FLAG_SMART_SUSPEND BIT(2)
#define DPM_FLAG_NEVER_SKIP BIT(0)
#define DPM_FLAG_SMART_PREPARE BIT(1)
#define DPM_FLAG_SMART_SUSPEND BIT(2)
#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
unsigned int must_resume:1; /* Owned by the PM core */
unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
@@ -765,6 +772,8 @@ extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void dev_pm_skip_next_resume_phases(struct device *dev);
extern bool dev_pm_may_skip_resume(struct device *dev);
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */

View File

@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
static inline void device_set_wakeup_path(struct device *dev)
{
dev->power.wakeup_path = true;
}
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && dev->power.should_wakeup;
}
static inline void device_set_wakeup_path(struct device *dev) {}
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}

View File

@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
#define MAKE_THREAD_CPUCLOCK(tid, clock) \
MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
return ((~pid) << 3) | clock;
}
static inline clockid_t make_thread_cpuclock(const unsigned int tid,
const clockid_t clock)
{
return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
}
#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
static inline clockid_t fd_to_clockid(const int fd)
{
return make_process_cpuclock((unsigned int) fd, CLOCKFD);
}
static inline int clockid_to_fd(const clockid_t clk)
{
return ~(clk >> 3);
}
#define REQUEUE_PENDING 1

View File

@@ -1,43 +1,11 @@
/*
* Copyright (C) Intel 2011
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The PTI (Parallel Trace Interface) driver directs trace data routed from
* various parts in the system out through the Intel Penwell PTI port and
* out of the mobile device for analysis with a debugging tool
* (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
* compact JTAG, standard.
*
* This header file will allow other parts of the OS to use the
* interface to write out it's contents for debugging a mobile system.
*/
// SPDX-License-Identifier: GPL-2.0
#ifndef _INCLUDE_PTI_H
#define _INCLUDE_PTI_H
#ifndef PTI_H_
#define PTI_H_
#ifdef CONFIG_PAGE_TABLE_ISOLATION
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
#endif
/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
#define PTI_LASTDWORD_DTS 0x30
/* basic structure used as a write address to the PTI HW */
struct pti_masterchannel {
u8 master;
u8 channel;
};
/* the following functions are defined in misc/pti.c */
void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
struct pti_masterchannel *pti_request_masterchannel(u8 type,
const char *thread_name);
void pti_release_masterchannel(struct pti_masterchannel *mc);
#endif /*PTI_H_*/
#endif

View File

@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must hold producer_lock.
* Callers are responsible for making sure pointer that is being queued
* points to a valid data.
*/
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
if (unlikely(!r->size) || r->queue[r->producer])
return -ENOSPC;
/* Make sure the pointer we are storing points to a valid data. */
/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
smp_wmb();
r->queue[r->producer++] = ptr;
if (unlikely(r->producer >= r->size))
r->producer = 0;
@@ -168,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
* if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
* If ring is never resized, and if the pointer is merely
* tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
* However, if called outside the lock, and if some other CPU
* consumes ring entries at the same time, the value returned
* is not guaranteed to be correct.
* In this case - to avoid incorrectly detecting the ring
* as empty - the CPU consuming the ring entries is responsible
* for either consuming all ring entries until the ring is empty,
* or synchronizing with some other CPU and causing it to
* execute __ptr_ring_peek and/or consume the ring enteries
* after the synchronization point.
*/
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
@@ -176,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
return NULL;
}
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must take consumer_lock
* if the ring is ever resized - see e.g. ptr_ring_empty.
*/
/* See __ptr_ring_peek above for locking rules. */
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
return !__ptr_ring_peek(r);
@@ -275,6 +287,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
if (ptr)
__ptr_ring_discard_one(r);
/* Make sure anyone accessing data through the pointer is up to date. */
/* Pairs with smp_wmb in __ptr_ring_produce. */
smp_read_barrier_depends();
return ptr;
}

View File

@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)

View File

@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
#define cond_resched_rcu_qs() \
do { \
if (!cond_resched()) \
rcu_note_voluntary_context_switch(current); \
rcu_note_voluntary_context_switch_lite(current); \
} while (0)
/*
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
* update-side locks prevent the value of the pointer from changing, you
* should instead use rcu_dereference_protected() for this use case.
* lockdep checks for being in an RCU read-side critical section. This is
* useful when the value of this pointer is accessed, but the pointer is
* not dereferenced, for example, when testing an RCU-protected pointer
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does *not*
* prevent the compiler from repeating this reference or combining it
* with other references, so it should not be used without protection
* of appropriate locks.
* the READ_ONCE(). This is useful in cases where update-side locks
* prevent the value of the pointer from changing. Please note that this
* primitive does *not* prevent the compiler from repeating this reference
* or combining it with other references, so it should not be used without
* protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent

View File

@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }

View File

@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_irq_enter_disabled(void);
void exit_rcu(void);

View File

@@ -30,6 +30,7 @@ struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
struct sdw_slave;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -264,6 +265,9 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
* @disable_locking: This regmap is either protected by external means or
* is guaranteed not be be accessed from multiple threads.
* Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -296,7 +300,10 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
* empty the regmap_bus default masks are used.
* empty and zero_flag_mask is not set the regmap_bus default
* masks are used.
* @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
* if they are both empty.
* @use_single_rw: If set, converts the bulk read and write operations into
* a series of single read and write operations. This is useful
* for device that does not support bulk read and write.
@@ -317,6 +324,7 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
* @use_hwlock: Indicate if a hardware spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
@@ -333,6 +341,8 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
@@ -355,6 +365,7 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
bool zero_flag_mask;
bool use_single_rw;
bool can_multi_write;
@@ -365,6 +376,7 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
bool use_hwlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
};
@@ -524,6 +536,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -561,6 +577,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -709,6 +729,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
ac97, config)
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
* regmap_init_sdw() - Initialise register map
*
* @sdw: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
#define regmap_init_sdw(sdw, config) \
__regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
sdw, config)
/**
* devm_regmap_init() - Initialise managed register map
*
@@ -839,6 +873,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
/**
* devm_regmap_init_sdw() - Initialise managed register map
*
* @sdw: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
#define devm_regmap_init_sdw(sdw, config) \
__regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
sdw, config)
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);

View File

@@ -214,6 +214,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
int (*resume_early)(struct regulator_dev *rdev);
int (*set_pull_down) (struct regulator_dev *);
};

View File

@@ -42,6 +42,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
/*
* operations in suspend mode
* DO_NOTHING_IN_SUSPEND - the default value
* DISABLE_IN_SUSPEND - turn off regulator in suspend states
* ENABLE_IN_SUSPEND - keep regulator on in suspend states
*/
#define DO_NOTHING_IN_SUSPEND (-1)
#define DISABLE_IN_SUSPEND 0
#define ENABLE_IN_SUSPEND 1
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
* @uV: Operating voltage during suspend.
* @uV: Default operating voltage during suspend, it can be adjusted
* among <min_uV, max_uV>.
* @min_uV: Minimum suspend voltage may be set.
* @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
* @enabled: Enabled during suspend.
* @disabled: Disabled during suspend.
* @enabled: operations during suspend.
* - DO_NOTHING_IN_SUSPEND
* - DISABLE_IN_SUSPEND
* - ENABLE_IN_SUSPEND
* @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
int uV; /* suspend voltage */
unsigned int mode; /* suspend regulator operating mode */
int enabled; /* is regulator enabled in this suspend state */
int disabled; /* is the regulator disabled in this suspend state */
int uV;
int min_uV;
int max_uV;
unsigned int mode;
int enabled;
bool changeable;
};
/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
int regulator_suspend_prepare(suspend_state_t state);
int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
#endif
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
#endif
#endif

View File

@@ -70,8 +70,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds RTNL.
* the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())

View File

@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/mfd/rtsx_common.h>
#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -203,6 +203,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
#define REG_SD_STOP_SDCLK_CFG 0xFDB8
#define SD30_CLK_STOP_CFG_EN 0x04
#define SD30_CLK_STOP_CFG1 0x02
#define SD30_CLK_STOP_CFG0 0x01
#define REG_PRE_RW_MODE 0xFD70
#define EN_INFINITE_MODE 0x01
#define SRCTL 0xFC13
@@ -434,6 +442,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
#define ALL_POWER_DOWN 0x07
#define OC_POWER_DOWN 0x06
#define ALL_POWER_DOWN 0x03
#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
#define LED_SHINE_MASK 0x08
#define LED_SHINE_EN 0x08
#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
#define EFUSE_CTL 0xFC30
#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
#define EFUSE_DATAL 0xFC34
#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
#define U_AUTO_DMA_EN_MASK 0x20
#define U_AUTO_DMA_DISABLE 0x00
#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
#define LTR_LATENCY_MODE_HW 0
#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
#define OBFF_EN_MASK 0x03
#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
#define AUX_CLK_ACTIVE_SEL_MASK 0x01
#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
#define IC_VERSION_MASK 0x0F
#define REG_VREF 0xFE97
#define PWD_SUSPND_EN 0x10
#define RTS5260_DMA_RST_CTL_0 0xFEBF
#define RTS5260_DMA_RST 0x80
#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
#define FORCE_CLKREQ_DELINK_MASK BIT(7)
#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
#define DV331812_MASK 0x70
#define DV331812_33 0x70
#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
#define DV331812_VDD1 0x04
#define DV331812_POWERON 0x08
#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
/*RTS5260*/
#define RTS5260_DVCC_TUNE_MASK 0x70
#define RTS5260_DVCC_33 0x70
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
/*RTS5260*/
#define LDO_POW_SDVDD1_MASK 0x08
#define LDO_POW_SDVDD1_ON 0x08
#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
#define RTS5260_REG_GPIO_CTL0 0xFC1A
#define RTS5260_REG_GPIO_MASK 0x01
#define RTS5260_REG_GPIO_ON 0x01
#define RTS5260_REG_GPIO_OFF 0x00
#define PWR_GLOBAL_CTRL 0xF200
#define PCIE_L1_2_EN 0x0C
#define PCIE_L1_1_EN 0x0A
#define PCIE_L1_0_EN 0x09
#define PWR_FE_CTL 0xF201
#define PCIE_L1_2_PD_FE_EN 0x0C
#define PCIE_L1_1_PD_FE_EN 0x0A
#define PCIE_L1_0_PD_FE_EN 0x09
#define CFG_PCIE_APHY_OFF_0 0xF204
#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
#define CFG_PCIE_APHY_OFF_1 0xF205
#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
#define CFG_PCIE_APHY_OFF_2 0xF206
#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
#define CFG_PCIE_APHY_OFF_3 0xF207
#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
#define CFG_L1_0_SYS_RET_VALUE 0xF210
#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
#define CFG_LP_FPWM_VALUE 0xF219
#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
#define PWC_CDR 0xF253
#define PWC_CDR_DEFAULT 0x03
#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
/* OCPCTL */
#define SD_DETECT_EN 0x08
#define SD_OCP_INT_EN 0x04
#define SD_OCP_INT_CLR 0x02
#define SD_OC_CLR 0x01
#define SDVIO_DETECT_EN (1 << 7)
#define SDVIO_OCP_INT_EN (1 << 6)
#define SDVIO_OCP_INT_CLR (1 << 5)
#define SDVIO_OC_CLR (1 << 4)
/* OCPSTAT */
#define SD_OCP_DETECT 0x08
#define SD_OC_NOW 0x04
#define SD_OC_EVER 0x02
#define SDVIO_OC_NOW (1 << 6)
#define SDVIO_OC_EVER (1 << 5)
#define REG_OCPCTL 0xFD6A
#define REG_OCPSTAT 0xFD6E
#define REG_OCPGLITCH 0xFD6C
#define REG_OCPPARA1 0xFD6B
#define REG_OCPPARA2 0xFD6D
/* rts5260 DV3318 OCP-related registers */
#define REG_DV3318_OCPCTL 0xFD89
#define DV3318_OCP_TIME_MASK 0xF0
#define DV3318_DETECT_EN 0x08
#define DV3318_OCP_INT_EN 0x04
#define DV3318_OCP_INT_CLR 0x02
#define DV3318_OCP_CLR 0x01
#define REG_DV3318_OCPSTAT 0xFD8A
#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
#define DV3318_OCP_DETECT 0x08
#define DV3318_OCP_NOW 0x04
#define DV3318_OCP_EVER 0x02
#define SD_OCP_GLITCH_MASK 0x0F
/* OCPPARA1 */
#define SDVIO_OCP_TIME_60 0x00
#define SDVIO_OCP_TIME_100 0x10
#define SDVIO_OCP_TIME_200 0x20
#define SDVIO_OCP_TIME_400 0x30
#define SDVIO_OCP_TIME_600 0x40
#define SDVIO_OCP_TIME_800 0x50
#define SDVIO_OCP_TIME_1100 0x60
#define SDVIO_OCP_TIME_MASK 0x70
#define SD_OCP_TIME_60 0x00
#define SD_OCP_TIME_100 0x01
#define SD_OCP_TIME_200 0x02
#define SD_OCP_TIME_400 0x03
#define SD_OCP_TIME_600 0x04
#define SD_OCP_TIME_800 0x05
#define SD_OCP_TIME_1100 0x06
#define SD_OCP_TIME_MASK 0x07
/* OCPPARA2 */
#define SDVIO_OCP_THD_190 0x00
#define SDVIO_OCP_THD_250 0x10
#define SDVIO_OCP_THD_320 0x20
#define SDVIO_OCP_THD_380 0x30
#define SDVIO_OCP_THD_440 0x40
#define SDVIO_OCP_THD_500 0x50
#define SDVIO_OCP_THD_570 0x60
#define SDVIO_OCP_THD_630 0x70
#define SDVIO_OCP_THD_MASK 0x70
#define SD_OCP_THD_450 0x00
#define SD_OCP_THD_550 0x01
#define SD_OCP_THD_650 0x02
#define SD_OCP_THD_750 0x03
#define SD_OCP_THD_850 0x04
#define SD_OCP_THD_950 0x05
#define SD_OCP_THD_1050 0x06
#define SD_OCP_THD_1150 0x07
#define SD_OCP_THD_MASK 0x07
#define SDVIO_OCP_GLITCH_MASK 0xF0
#define SDVIO_OCP_GLITCH_NONE 0x00
#define SDVIO_OCP_GLITCH_50U 0x10
#define SDVIO_OCP_GLITCH_100U 0x20
#define SDVIO_OCP_GLITCH_200U 0x30
#define SDVIO_OCP_GLITCH_600U 0x40
#define SDVIO_OCP_GLITCH_800U 0x50
#define SDVIO_OCP_GLITCH_1M 0x60
#define SDVIO_OCP_GLITCH_2M 0x70
#define SDVIO_OCP_GLITCH_3M 0x80
#define SDVIO_OCP_GLITCH_4M 0x90
#define SDVIO_OCP_GLIVCH_5M 0xA0
#define SDVIO_OCP_GLITCH_6M 0xB0
#define SDVIO_OCP_GLITCH_7M 0xC0
#define SDVIO_OCP_GLITCH_8M 0xD0
#define SDVIO_OCP_GLITCH_9M 0xE0
#define SDVIO_OCP_GLITCH_10M 0xF0
#define SD_OCP_GLITCH_MASK 0x0F
#define SD_OCP_GLITCH_NONE 0x00
#define SD_OCP_GLITCH_50U 0x01
#define SD_OCP_GLITCH_100U 0x02
#define SD_OCP_GLITCH_200U 0x03
#define SD_OCP_GLITCH_600U 0x04
#define SD_OCP_GLITCH_800U 0x05
#define SD_OCP_GLITCH_1M 0x06
#define SD_OCP_GLITCH_2M 0x07
#define SD_OCP_GLITCH_3M 0x08
#define SD_OCP_GLITCH_4M 0x09
#define SD_OCP_GLIVCH_5M 0x0A
#define SD_OCP_GLITCH_6M 0x0B
#define SD_OCP_GLITCH_7M 0x0C
#define SD_OCP_GLITCH_8M 0x0D
#define SD_OCP_GLITCH_9M 0x0E
#define SD_OCP_GLITCH_10M 0x0F
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
#define PCR_ASPM_SETTING_REG1 0x160
#define PCR_ASPM_SETTING_REG2 0x168
#define PCR_ASPM_SETTING_5260 0x178
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
void (*full_on)(struct rtsx_pcr *pcr);
void (*power_saving)(struct rtsx_pcr *pcr);
void (*enable_ocp)(struct rtsx_pcr *pcr);
void (*disable_ocp)(struct rtsx_pcr *pcr);
void (*init_ocp)(struct rtsx_pcr *pcr);
void (*process_ocp)(struct rtsx_pcr *pcr);
int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -915,10 +1121,10 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
enum dev_aspm_mode {
DEV_ASPM_DISABLE = 0,
DEV_ASPM_DYNAMIC,
DEV_ASPM_BACKDOOR,
DEV_ASPM_STATIC,
DEV_ASPM_DISABLE,
};
/*
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
* @ocp_en: enable ocp flag
* @sd_400mA_ocp_thd: 400mA ocp thd
* @sd_800mA_ocp_thd: 800mA ocp thd
*/
struct rtsx_cr_option {
u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
bool ocp_en;
u8 sd_400mA_ocp_thd;
u8 sd_800mA_ocp_thd;
};
/*
* struct rtsx_hw_param - card reader hardware param
* @interrupt_en: indicate which interrutp enable
* @ocp_glitch: ocp glitch time
*/
struct rtsx_hw_param {
u32 interrupt_en;
u8 ocp_glitch;
};
#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
unsigned int id;
int pcie_cap;
struct rtsx_cr_option option;
struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
};
#define PID_524A 0x524A
#define PID_5249 0x5249
#define PID_5250 0x5250
#define PID_5249 0x5249
#define PID_5250 0x5250
#define PID_525A 0x525A
#define PID_5260 0x5260
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)

View File

@@ -10,9 +10,6 @@
*/
typedef struct {
arch_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;

View File

@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask);
#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
unsigned int order, bool chainable,
gfp_t gfp, unsigned int *nent_p);
struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
unsigned int *nent_p);
void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
void sgl_free_order(struct scatterlist *sgl, int order);
void sgl_free(struct scatterlist *sgl);
#endif /* CONFIG_SGL_ALLOC */
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);

View File

@@ -472,11 +472,15 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
*/
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -849,17 +853,6 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
struct hist_lock *xhlocks; /* Crossrelease history locks */
unsigned int xhlock_idx;
/* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
unsigned int hist_id;
/* For overwrite check at each context exit */
unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif
#ifdef CONFIG_UBSAN
unsigned int in_ubsan;
#endif
@@ -1438,6 +1431,7 @@ extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1457,12 +1451,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
#ifndef CONFIG_THREAD_INFO_IN_TASK
extern struct thread_info init_thread_info;
#endif
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
@@ -1503,7 +1506,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
__set_task_comm(tsk, from, false);
}
extern char *get_task_comm(char *to, struct task_struct *tsk);
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
#define get_task_comm(buf, tsk) ({ \
BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
__get_task_comm(buf, sizeof(buf), tsk); \
})
#ifdef CONFIG_SMP
void scheduler_ipi(void);

View File

@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\

View File

@@ -12,8 +12,6 @@
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);

View File

@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);

View File

@@ -6,6 +6,12 @@
#include <linux/sched/idle.h>
/*
* Increase resolution of cpu_capacity calculations
*/
#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
* sched-domains (multiprocessor balancing) declarations:
*/
@@ -27,12 +33,6 @@
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
/*
* Increase resolution of cpu_capacity calculations
*/
#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{

View File

@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
int seq = READ_ONCE(s->sequence);
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
smp_read_barrier_depends();
int seq = READ_ONCE(s->sequence); /* ^^^ */
return seq;
}

Some files were not shown because too many files have changed in this diff Show More