Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
This commit is contained in:
@@ -506,7 +506,8 @@ extern bool osc_pc_lpi_support_confirmed;
|
||||
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
|
||||
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
|
||||
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
|
||||
#define OSC_PCI_CONTROL_MASKS 0x0000001f
|
||||
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
|
||||
#define OSC_PCI_CONTROL_MASKS 0x0000003f
|
||||
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
|
||||
@@ -578,6 +579,7 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
|
||||
|
||||
extern void acpi_early_init(void);
|
||||
extern void acpi_subsystem_init(void);
|
||||
extern void arch_post_acpi_subsys_init(void);
|
||||
|
||||
extern int acpi_nvs_register(__u64 start, __u64 size);
|
||||
|
||||
@@ -899,7 +901,7 @@ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1297,4 +1299,23 @@ static inline int lpit_read_residency_count_address(u64 *address)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_PPTT
|
||||
int find_acpi_cpu_topology(unsigned int cpu, int level);
|
||||
int find_acpi_cpu_topology_package(unsigned int cpu);
|
||||
int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
|
||||
#else
|
||||
static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int find_acpi_cpu_topology_package(unsigned int cpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#define AER_NONFATAL 0
|
||||
#define AER_FATAL 1
|
||||
#define AER_CORRECTABLE 2
|
||||
#define DPC_FATAL 3
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
|
@@ -8,8 +8,6 @@ struct kioctx;
|
||||
struct kiocb;
|
||||
struct mm_struct;
|
||||
|
||||
#define KIOCB_KEY 0
|
||||
|
||||
typedef int (kiocb_cancel_fn)(struct kiocb *);
|
||||
|
||||
/* prototypes */
|
||||
|
@@ -80,6 +80,11 @@
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x8000)
|
||||
|
||||
#define ARM_SMCCC_ARCH_WORKAROUND_2 \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x7fff)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
*/
|
||||
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
|
||||
|
||||
/* Return codes defined in ARM DEN 0070A */
|
||||
#define SMCCC_RET_SUCCESS 0
|
||||
#define SMCCC_RET_NOT_SUPPORTED -1
|
||||
#define SMCCC_RET_NOT_REQUIRED -2
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* Generic associative array implementation.
|
||||
*
|
||||
* See Documentation/assoc_array.txt for information.
|
||||
* See Documentation/core-api/assoc_array.rst for information.
|
||||
*
|
||||
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* Private definitions for the generic associative array implementation.
|
||||
*
|
||||
* See Documentation/assoc_array.txt for information.
|
||||
* See Documentation/core-api/assoc_array.rst for information.
|
||||
*
|
||||
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@@ -145,7 +145,12 @@ extern rwlock_t atalk_interfaces_lock;
|
||||
|
||||
extern struct atalk_route atrtr_default;
|
||||
|
||||
extern const struct file_operations atalk_seq_arp_fops;
|
||||
struct aarp_iter_state {
|
||||
int bucket;
|
||||
struct aarp_entry **table;
|
||||
};
|
||||
|
||||
extern const struct seq_operations aarp_seq_ops;
|
||||
|
||||
extern int sysctl_aarp_expiry_time;
|
||||
extern int sysctl_aarp_tick_time;
|
||||
|
@@ -654,6 +654,7 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
@@ -1075,6 +1076,7 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
|
||||
}
|
||||
#endif
|
||||
|
||||
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
|
||||
#include <asm-generic/atomic-long.h>
|
||||
|
@@ -232,12 +232,24 @@ extern void __audit_file(const struct file *);
|
||||
extern void __audit_inode_child(struct inode *parent,
|
||||
const struct dentry *dentry,
|
||||
const unsigned char type);
|
||||
extern void __audit_seccomp(unsigned long syscall, long signr, int code);
|
||||
extern void audit_seccomp(unsigned long syscall, long signr, int code);
|
||||
extern void audit_seccomp_actions_logged(const char *names,
|
||||
const char *old_names, int res);
|
||||
extern void __audit_ptrace(struct task_struct *t);
|
||||
|
||||
static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
|
||||
{
|
||||
task->audit_context = ctx;
|
||||
}
|
||||
|
||||
static inline struct audit_context *audit_context(void)
|
||||
{
|
||||
return current->audit_context;
|
||||
}
|
||||
|
||||
static inline bool audit_dummy_context(void)
|
||||
{
|
||||
void *p = current->audit_context;
|
||||
void *p = audit_context();
|
||||
return !p || *(int *)p;
|
||||
}
|
||||
static inline void audit_free(struct task_struct *task)
|
||||
@@ -249,12 +261,12 @@ static inline void audit_syscall_entry(int major, unsigned long a0,
|
||||
unsigned long a1, unsigned long a2,
|
||||
unsigned long a3)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(audit_context()))
|
||||
__audit_syscall_entry(major, a0, a1, a2, a3);
|
||||
}
|
||||
static inline void audit_syscall_exit(void *pt_regs)
|
||||
{
|
||||
if (unlikely(current->audit_context)) {
|
||||
if (unlikely(audit_context())) {
|
||||
int success = is_syscall_success(pt_regs);
|
||||
long return_code = regs_return_value(pt_regs);
|
||||
|
||||
@@ -302,12 +314,6 @@ static inline void audit_inode_child(struct inode *parent,
|
||||
}
|
||||
void audit_core_dumps(long signr);
|
||||
|
||||
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
|
||||
{
|
||||
if (audit_enabled && unlikely(!audit_dummy_context()))
|
||||
__audit_seccomp(syscall, signr, code);
|
||||
}
|
||||
|
||||
static inline void audit_ptrace(struct task_struct *t)
|
||||
{
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
@@ -468,6 +474,12 @@ static inline bool audit_dummy_context(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
|
||||
{ }
|
||||
static inline struct audit_context *audit_context(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct filename *audit_reusename(const __user char *name)
|
||||
{
|
||||
return NULL;
|
||||
@@ -498,10 +510,11 @@ static inline void audit_inode_child(struct inode *parent,
|
||||
{ }
|
||||
static inline void audit_core_dumps(long signr)
|
||||
{ }
|
||||
static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
|
||||
{ }
|
||||
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
|
||||
{ }
|
||||
static inline void audit_seccomp_actions_logged(const char *names,
|
||||
const char *old_names, int res)
|
||||
{ }
|
||||
static inline int auditsc_get_stamp(struct audit_context *ctx,
|
||||
struct timespec64 *t, unsigned int *serial)
|
||||
{
|
||||
@@ -513,7 +526,7 @@ static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
|
||||
}
|
||||
static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
|
||||
{
|
||||
return -1;
|
||||
return AUDIT_SID_UNSET;
|
||||
}
|
||||
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
|
||||
{ }
|
||||
|
@@ -485,7 +485,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
|
||||
struct virtchnl_rss_lut {
|
||||
u16 vsi_id;
|
||||
u16 lut_entries;
|
||||
u8 lut[1]; /* RSS lookup table*/
|
||||
u8 lut[1]; /* RSS lookup table */
|
||||
};
|
||||
|
||||
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
|
||||
@@ -819,7 +819,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
|
||||
return VIRTCHNL_ERR_PARAM;
|
||||
}
|
||||
/* few more checks */
|
||||
if ((valid_len != msglen) || (err_msg_format))
|
||||
if (err_msg_format || valid_len != msglen)
|
||||
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
|
||||
|
||||
return 0;
|
||||
|
@@ -84,10 +84,6 @@ struct backlight_properties {
|
||||
|
||||
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
|
||||
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
|
||||
#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
|
||||
#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
|
||||
#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
|
||||
#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
|
||||
|
||||
};
|
||||
|
||||
|
@@ -150,5 +150,6 @@ extern int do_execveat(int, struct filename *,
|
||||
const char __user * const __user *,
|
||||
const char __user * const __user *,
|
||||
int);
|
||||
int do_execve_file(struct file *file, void *__argv, void *__envp);
|
||||
|
||||
#endif /* _LINUX_BINFMTS_H */
|
||||
|
@@ -67,8 +67,12 @@
|
||||
|
||||
#define bio_multiple_segments(bio) \
|
||||
((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
|
||||
#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
|
||||
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
|
||||
|
||||
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
|
||||
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
|
||||
|
||||
#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
|
||||
#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
|
||||
|
||||
/*
|
||||
* Return the data direction, READ or WRITE.
|
||||
@@ -123,6 +127,11 @@ static inline void *bio_data(struct bio *bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bio_full(struct bio *bio)
|
||||
{
|
||||
return bio->bi_vcnt >= bio->bi_max_vecs;
|
||||
}
|
||||
|
||||
/*
|
||||
* will die
|
||||
*/
|
||||
@@ -406,13 +415,14 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
|
||||
return bio_split(bio, sectors, gfp, bs);
|
||||
}
|
||||
|
||||
extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
|
||||
enum {
|
||||
BIOSET_NEED_BVECS = BIT(0),
|
||||
BIOSET_NEED_RESCUER = BIT(1),
|
||||
};
|
||||
extern void bioset_free(struct bio_set *);
|
||||
extern mempool_t *biovec_create_pool(int pool_entries);
|
||||
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
|
||||
extern void bioset_exit(struct bio_set *);
|
||||
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
||||
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||
|
||||
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
|
||||
extern void bio_put(struct bio *);
|
||||
@@ -421,11 +431,11 @@ extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
||||
|
||||
extern struct bio_set *fs_bio_set;
|
||||
extern struct bio_set fs_bio_set;
|
||||
|
||||
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
{
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
|
||||
}
|
||||
|
||||
static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
@@ -470,6 +480,10 @@ void bio_chain(struct bio *, struct bio *);
|
||||
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
void __bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||
struct rq_map_data;
|
||||
extern struct bio *bio_map_user_iov(struct request_queue *,
|
||||
@@ -499,7 +513,10 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
struct bio *src, struct bvec_iter *src_iter);
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
extern void bio_list_copy_data(struct bio *dst, struct bio *src);
|
||||
extern void bio_free_pages(struct bio *bio);
|
||||
|
||||
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
@@ -507,7 +524,13 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
struct iov_iter *,
|
||||
gfp_t);
|
||||
extern int bio_uncopy_user(struct bio *);
|
||||
void zero_fill_bio(struct bio *bio);
|
||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
||||
|
||||
static inline void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
zero_fill_bio_iter(bio, bio->bi_iter);
|
||||
}
|
||||
|
||||
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
|
||||
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
@@ -722,11 +745,11 @@ struct bio_set {
|
||||
struct kmem_cache *bio_slab;
|
||||
unsigned int front_pad;
|
||||
|
||||
mempool_t *bio_pool;
|
||||
mempool_t *bvec_pool;
|
||||
mempool_t bio_pool;
|
||||
mempool_t bvec_pool;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
mempool_t *bio_integrity_pool;
|
||||
mempool_t *bvec_integrity_pool;
|
||||
mempool_t bio_integrity_pool;
|
||||
mempool_t bvec_integrity_pool;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -745,6 +768,11 @@ struct biovec_slab {
|
||||
struct kmem_cache *slab;
|
||||
};
|
||||
|
||||
static inline bool bioset_initialized(struct bio_set *bs)
|
||||
{
|
||||
return bs->bio_slab != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
* basically we just need to survive
|
||||
|
@@ -259,7 +259,8 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||
void blk_mq_complete_request(struct request *rq);
|
||||
|
||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||
struct bio *bio);
|
||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
@@ -280,8 +281,6 @@ void blk_freeze_queue_start(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_wait(struct request_queue *q);
|
||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
||||
unsigned long timeout);
|
||||
int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
|
||||
int (reinit_request)(void *, struct request *));
|
||||
|
||||
int blk_mq_map_queues(struct blk_mq_tag_set *set);
|
||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bvec.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
struct bio_set;
|
||||
struct bio;
|
||||
@@ -90,10 +91,52 @@ static inline bool blk_path_error(blk_status_t error)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct blk_issue_stat {
|
||||
u64 stat;
|
||||
/*
|
||||
* From most significant bit:
|
||||
* 1 bit: reserved for other usage, see below
|
||||
* 12 bits: original size of bio
|
||||
* 51 bits: issue time of bio
|
||||
*/
|
||||
#define BIO_ISSUE_RES_BITS 1
|
||||
#define BIO_ISSUE_SIZE_BITS 12
|
||||
#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
|
||||
#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
|
||||
#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
|
||||
#define BIO_ISSUE_SIZE_MASK \
|
||||
(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
|
||||
#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
|
||||
|
||||
/* Reserved bit for blk-throtl */
|
||||
#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
|
||||
|
||||
struct bio_issue {
|
||||
u64 value;
|
||||
};
|
||||
|
||||
static inline u64 __bio_issue_time(u64 time)
|
||||
{
|
||||
return time & BIO_ISSUE_TIME_MASK;
|
||||
}
|
||||
|
||||
static inline u64 bio_issue_time(struct bio_issue *issue)
|
||||
{
|
||||
return __bio_issue_time(issue->value);
|
||||
}
|
||||
|
||||
static inline sector_t bio_issue_size(struct bio_issue *issue)
|
||||
{
|
||||
return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void bio_issue_init(struct bio_issue *issue,
|
||||
sector_t size)
|
||||
{
|
||||
size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
|
||||
issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
|
||||
(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
|
||||
((u64)size << BIO_ISSUE_SIZE_SHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
* main unit of I/O for the block layer and lower layers (ie drivers and
|
||||
* stacking drivers)
|
||||
@@ -138,7 +181,7 @@ struct bio {
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
void *bi_cg_private;
|
||||
struct blk_issue_stat bi_issue_stat;
|
||||
struct bio_issue bi_issue;
|
||||
#endif
|
||||
#endif
|
||||
union {
|
||||
@@ -186,6 +229,8 @@ struct bio {
|
||||
* throttling rules. Don't do it again. */
|
||||
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
|
||||
* of this bio. */
|
||||
#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
|
||||
|
||||
/* See BVEC_POOL_OFFSET below before adding new flags */
|
||||
|
||||
/*
|
||||
|
@@ -125,15 +125,24 @@ typedef __u32 __bitwise req_flags_t;
|
||||
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
|
||||
/* The per-zone write lock is held for this request */
|
||||
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
|
||||
/* timeout is expired */
|
||||
#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
|
||||
/* already slept for hybrid poll */
|
||||
#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
|
||||
#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
|
||||
/* ->timeout has been called, don't expire again */
|
||||
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
|
||||
|
||||
/* flags that prevent us from merging requests: */
|
||||
#define RQF_NOMERGE_FLAGS \
|
||||
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
|
||||
|
||||
/*
|
||||
* Request state for blk-mq.
|
||||
*/
|
||||
enum mq_rq_state {
|
||||
MQ_RQ_IDLE = 0,
|
||||
MQ_RQ_IN_FLIGHT = 1,
|
||||
MQ_RQ_COMPLETE = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Try to put the fields that are referenced together in the same cacheline.
|
||||
*
|
||||
@@ -205,9 +214,20 @@ struct request {
|
||||
|
||||
struct gendisk *rq_disk;
|
||||
struct hd_struct *part;
|
||||
unsigned long start_time;
|
||||
struct blk_issue_stat issue_stat;
|
||||
/* Number of scatter-gather DMA addr+len pairs after
|
||||
/* Time that I/O was submitted to the kernel. */
|
||||
u64 start_time_ns;
|
||||
/* Time that I/O was submitted to the device. */
|
||||
u64 io_start_time_ns;
|
||||
|
||||
#ifdef CONFIG_BLK_WBT
|
||||
unsigned short wbt_flags;
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
unsigned short throtl_size;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Number of scatter-gather DMA addr+len pairs after
|
||||
* physical address coalescing is performed.
|
||||
*/
|
||||
unsigned short nr_phys_segments;
|
||||
@@ -219,32 +239,14 @@ struct request {
|
||||
unsigned short write_hint;
|
||||
unsigned short ioprio;
|
||||
|
||||
unsigned int timeout;
|
||||
|
||||
void *special; /* opaque pointer available for LLD use */
|
||||
|
||||
unsigned int extra_len; /* length of alignment and padding */
|
||||
|
||||
/*
|
||||
* On blk-mq, the lower bits of ->gstate (generation number and
|
||||
* state) carry the MQ_RQ_* state value and the upper bits the
|
||||
* generation number which is monotonically incremented and used to
|
||||
* distinguish the reuse instances.
|
||||
*
|
||||
* ->gstate_seq allows updates to ->gstate and other fields
|
||||
* (currently ->deadline) during request start to be read
|
||||
* atomically from the timeout path, so that it can operate on a
|
||||
* coherent set of information.
|
||||
*/
|
||||
seqcount_t gstate_seq;
|
||||
u64 gstate;
|
||||
enum mq_rq_state state;
|
||||
refcount_t ref;
|
||||
|
||||
/*
|
||||
* ->aborted_gstate is used by the timeout to claim a specific
|
||||
* recycle instance of this request. See blk_mq_timeout_work().
|
||||
*/
|
||||
struct u64_stats_sync aborted_gstate_sync;
|
||||
u64 aborted_gstate;
|
||||
unsigned int timeout;
|
||||
|
||||
/* access through blk_rq_set_deadline, blk_rq_deadline */
|
||||
unsigned long __deadline;
|
||||
@@ -267,8 +269,6 @@ struct request {
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct request_list *rl; /* rl this rq is alloced from */
|
||||
unsigned long long start_time_ns;
|
||||
unsigned long long io_start_time_ns; /* when passed to hardware */
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -328,9 +328,8 @@ typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
|
||||
typedef void (exit_rq_fn)(struct request_queue *, struct request *);
|
||||
|
||||
enum blk_eh_timer_return {
|
||||
BLK_EH_NOT_HANDLED,
|
||||
BLK_EH_HANDLED,
|
||||
BLK_EH_RESET_TIMER,
|
||||
BLK_EH_DONE, /* drivers has completed the command */
|
||||
BLK_EH_RESET_TIMER, /* reset timer and try again */
|
||||
};
|
||||
|
||||
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
|
||||
@@ -563,7 +562,6 @@ struct request_queue {
|
||||
unsigned int dma_alignment;
|
||||
|
||||
struct blk_queue_tag *queue_tags;
|
||||
struct list_head tag_busy_list;
|
||||
|
||||
unsigned int nr_sorted;
|
||||
unsigned int in_flight[2];
|
||||
@@ -655,7 +653,7 @@ struct request_queue {
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
struct list_head tag_set_list;
|
||||
struct bio_set *bio_split;
|
||||
struct bio_set bio_split;
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct dentry *debugfs_dir;
|
||||
@@ -967,11 +965,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
extern struct request *blk_get_request_flags(struct request_queue *,
|
||||
unsigned int op,
|
||||
blk_mq_req_flags_t flags);
|
||||
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
|
||||
gfp_t gfp_mask);
|
||||
blk_mq_req_flags_t flags);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
@@ -1379,7 +1374,6 @@ extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
||||
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
|
||||
extern void blk_queue_free_tags(struct request_queue *);
|
||||
extern int blk_queue_resize_tags(struct request_queue *, int);
|
||||
extern void blk_queue_invalidate_tags(struct request_queue *);
|
||||
extern struct blk_queue_tag *blk_init_tags(int, int);
|
||||
extern void blk_free_tags(struct blk_queue_tag *);
|
||||
|
||||
@@ -1788,48 +1782,6 @@ int kblockd_schedule_work(struct work_struct *work);
|
||||
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
|
||||
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
* This should not be using sched_clock(). A real patch is in progress
|
||||
* to fix this up, until that is in place we need to disable preemption
|
||||
* around sched_clock() in this function and set_io_start_time_ns().
|
||||
*/
|
||||
static inline void set_start_time_ns(struct request *req)
|
||||
{
|
||||
preempt_disable();
|
||||
req->start_time_ns = sched_clock();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void set_io_start_time_ns(struct request *req)
|
||||
{
|
||||
preempt_disable();
|
||||
req->io_start_time_ns = sched_clock();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline uint64_t rq_start_time_ns(struct request *req)
|
||||
{
|
||||
return req->start_time_ns;
|
||||
}
|
||||
|
||||
static inline uint64_t rq_io_start_time_ns(struct request *req)
|
||||
{
|
||||
return req->io_start_time_ns;
|
||||
}
|
||||
#else
|
||||
static inline void set_start_time_ns(struct request *req) {}
|
||||
static inline void set_io_start_time_ns(struct request *req) {}
|
||||
static inline uint64_t rq_start_time_ns(struct request *req)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline uint64_t rq_io_start_time_ns(struct request *req)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
|
||||
|
@@ -66,7 +66,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
enum bpf_attach_type type);
|
||||
enum bpf_attach_type type,
|
||||
void *t_ctx);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
struct bpf_sock_ops_kern *sock_ops,
|
||||
@@ -120,16 +121,18 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
NULL); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
t_ctx); \
|
||||
release_sock(sk); \
|
||||
} \
|
||||
__ret; \
|
||||
@@ -151,10 +154,16 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
|
||||
({ \
|
||||
@@ -185,6 +194,7 @@ struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
|
||||
#define cgroup_bpf_enabled (0)
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
@@ -197,6 +207,8 @@ static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
|
||||
|
@@ -22,6 +22,8 @@ struct perf_event;
|
||||
struct bpf_prog;
|
||||
struct bpf_map;
|
||||
struct sock;
|
||||
struct seq_file;
|
||||
struct btf;
|
||||
|
||||
/* map is generic key/value storage optionally accesible by eBPF programs */
|
||||
struct bpf_map_ops {
|
||||
@@ -44,10 +46,14 @@ struct bpf_map_ops {
|
||||
void (*map_fd_put_ptr)(void *ptr);
|
||||
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
|
||||
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
||||
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
||||
struct seq_file *m);
|
||||
int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 key_type_id, u32 value_type_id);
|
||||
};
|
||||
|
||||
struct bpf_map {
|
||||
/* 1st cacheline with read-mostly members of which some
|
||||
/* The first two cachelines with read-mostly members of which some
|
||||
* are also accessed in fast-path (e.g. ops, max_entries).
|
||||
*/
|
||||
const struct bpf_map_ops *ops ____cacheline_aligned;
|
||||
@@ -63,10 +69,13 @@ struct bpf_map {
|
||||
u32 pages;
|
||||
u32 id;
|
||||
int numa_node;
|
||||
u32 btf_key_type_id;
|
||||
u32 btf_value_type_id;
|
||||
struct btf *btf;
|
||||
bool unpriv_array;
|
||||
/* 7 bytes hole */
|
||||
/* 55 bytes hole */
|
||||
|
||||
/* 2nd cacheline with misc members to avoid false sharing
|
||||
/* The 3rd and 4th cacheline with misc members to avoid false sharing
|
||||
* particularly with refcounting.
|
||||
*/
|
||||
struct user_struct *user ____cacheline_aligned;
|
||||
@@ -101,6 +110,16 @@ static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
|
||||
return container_of(map, struct bpf_offloaded_map, map);
|
||||
}
|
||||
|
||||
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
|
||||
{
|
||||
return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
|
||||
}
|
||||
|
||||
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
|
||||
{
|
||||
return map->ops->map_seq_show_elem && map->ops->map_check_btf;
|
||||
}
|
||||
|
||||
extern const struct bpf_map_ops bpf_map_offload_ops;
|
||||
|
||||
/* function argument constraints */
|
||||
@@ -221,6 +240,8 @@ struct bpf_verifier_ops {
|
||||
struct bpf_insn_access_aux *info);
|
||||
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
|
||||
const struct bpf_prog *prog);
|
||||
int (*gen_ld_abs)(const struct bpf_insn *orig,
|
||||
struct bpf_insn *insn_buf);
|
||||
u32 (*convert_ctx_access)(enum bpf_access_type type,
|
||||
const struct bpf_insn *src,
|
||||
struct bpf_insn *dst,
|
||||
@@ -442,6 +463,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
|
||||
|
||||
int bpf_get_file_flag(int flags);
|
||||
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
|
||||
size_t actual_size);
|
||||
|
||||
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
|
||||
* forced to use 'long' read/writes to try to atomically copy long counters.
|
||||
@@ -464,14 +487,17 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
|
||||
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||
|
||||
/* Map specifics */
|
||||
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
struct xdp_buff;
|
||||
|
||||
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||
void __dev_map_flush(struct bpf_map *map);
|
||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||
void __cpu_map_flush(struct bpf_map *map);
|
||||
struct xdp_buff;
|
||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
|
||||
@@ -550,6 +576,16 @@ static inline void __dev_map_flush(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
struct xdp_buff;
|
||||
struct bpf_dtab_netdev;
|
||||
|
||||
static inline
|
||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
@@ -564,7 +600,6 @@ static inline void __cpu_map_flush(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
struct xdp_buff;
|
||||
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
|
||||
struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
@@ -606,7 +641,7 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
|
||||
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
|
||||
|
||||
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
|
||||
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
|
||||
{
|
||||
return aux->offload_requested;
|
||||
}
|
||||
@@ -647,6 +682,7 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
|
||||
|
||||
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
|
||||
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
|
||||
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
|
||||
#else
|
||||
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
@@ -654,6 +690,12 @@ static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
|
||||
void *key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int sock_map_prog(struct bpf_map *map,
|
||||
struct bpf_prog *prog,
|
||||
u32 type)
|
||||
@@ -662,6 +704,31 @@ static inline int sock_map_prog(struct bpf_map *map,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
struct xdp_sock;
|
||||
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
struct xdp_sock *xs);
|
||||
void __xsk_map_flush(struct bpf_map *map);
|
||||
#else
|
||||
struct xdp_sock;
|
||||
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||
struct xdp_sock *xs)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void __xsk_map_flush(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* verifier prototypes for helper functions called from eBPF programs */
|
||||
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_map_update_elem_proto;
|
||||
@@ -675,10 +742,11 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_comm_proto;
|
||||
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
|
||||
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
|
||||
extern const struct bpf_func_proto bpf_get_stackid_proto;
|
||||
extern const struct bpf_func_proto bpf_get_stack_proto;
|
||||
extern const struct bpf_func_proto bpf_sock_map_update_proto;
|
||||
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
|
||||
|
||||
/* Shared helpers among cBPF and eBPF. */
|
||||
void bpf_user_rnd_init_once(void);
|
||||
|
29
include/linux/bpf_lirc.h
Normal file
29
include/linux/bpf_lirc.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BPF_LIRC_H
|
||||
#define _BPF_LIRC_H
|
||||
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
#ifdef CONFIG_BPF_LIRC_MODE2
|
||||
int lirc_prog_attach(const union bpf_attr *attr);
|
||||
int lirc_prog_detach(const union bpf_attr *attr);
|
||||
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
|
||||
#else
|
||||
static inline int lirc_prog_attach(const union bpf_attr *attr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int lirc_prog_detach(const union bpf_attr *attr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int lirc_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BPF_LIRC_H */
|
@@ -2,7 +2,6 @@
|
||||
#ifndef __LINUX_BPF_TRACE_H__
|
||||
#define __LINUX_BPF_TRACE_H__
|
||||
|
||||
#include <trace/events/bpf.h>
|
||||
#include <trace/events/xdp.h>
|
||||
|
||||
#endif /* __LINUX_BPF_TRACE_H__ */
|
||||
|
@@ -9,9 +9,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
|
||||
@@ -25,6 +26,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
|
||||
#endif
|
||||
#ifdef CONFIG_BPF_LIRC_MODE2
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
|
||||
#endif
|
||||
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
|
||||
@@ -47,6 +51,10 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -174,6 +174,11 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
|
||||
|
||||
#define BPF_MAX_SUBPROGS 256
|
||||
|
||||
struct bpf_subprog_info {
|
||||
u32 start; /* insn idx of function entry point */
|
||||
u16 stack_depth; /* max. stack depth used by this function */
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
* one verifier_env per bpf_check() call
|
||||
*/
|
||||
@@ -192,14 +197,12 @@ struct bpf_verifier_env {
|
||||
bool seen_direct_write;
|
||||
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
||||
struct bpf_verifier_log log;
|
||||
u32 subprog_starts[BPF_MAX_SUBPROGS];
|
||||
/* computes the stack depth of each bpf function */
|
||||
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
|
||||
u32 subprog_cnt;
|
||||
};
|
||||
|
||||
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
||||
va_list args);
|
||||
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
|
||||
const char *fmt, va_list args);
|
||||
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...);
|
||||
|
||||
|
15
include/linux/bpfilter.h
Normal file
15
include/linux/bpfilter.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BPFILTER_H
|
||||
#define _LINUX_BPFILTER_H
|
||||
|
||||
#include <uapi/linux/bpfilter.h>
|
||||
|
||||
struct sock;
|
||||
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
|
||||
unsigned int optlen);
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
|
||||
int *optlen);
|
||||
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
|
||||
char __user *optval,
|
||||
unsigned int optlen, bool is_set);
|
||||
#endif
|
@@ -85,6 +85,7 @@
|
||||
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
|
||||
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
|
||||
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
|
||||
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
|
||||
|
||||
#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
|
||||
#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
|
||||
@@ -219,6 +220,9 @@
|
||||
#define BCM54810_SHD_CLK_CTL 0x3
|
||||
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
|
||||
|
||||
/* BCM54612E Registers */
|
||||
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
|
||||
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Fast Ethernet Transceiver definitions. */
|
||||
|
@@ -72,8 +72,7 @@ struct bsg_job {
|
||||
void bsg_job_done(struct bsg_job *job, int result,
|
||||
unsigned int reply_payload_rcv_len);
|
||||
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
bsg_job_fn *job_fn, int dd_job_size,
|
||||
void (*release)(struct device *));
|
||||
bsg_job_fn *job_fn, int dd_job_size);
|
||||
void bsg_job_put(struct bsg_job *job);
|
||||
int __must_check bsg_job_get(struct bsg_job *job);
|
||||
|
||||
|
@@ -17,17 +17,13 @@ struct bsg_ops {
|
||||
|
||||
struct bsg_class_device {
|
||||
struct device *class_dev;
|
||||
struct device *parent;
|
||||
int minor;
|
||||
struct request_queue *queue;
|
||||
struct kref ref;
|
||||
const struct bsg_ops *ops;
|
||||
void (*release)(struct device *);
|
||||
};
|
||||
|
||||
int bsg_register_queue(struct request_queue *q, struct device *parent,
|
||||
const char *name, const struct bsg_ops *ops,
|
||||
void (*release)(struct device *));
|
||||
const char *name, const struct bsg_ops *ops);
|
||||
int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
|
||||
void bsg_unregister_queue(struct request_queue *q);
|
||||
#else
|
||||
|
50
include/linux/btf.h
Normal file
50
include/linux/btf.h
Normal file
@@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2018 Facebook */
|
||||
|
||||
#ifndef _LINUX_BTF_H
|
||||
#define _LINUX_BTF_H 1
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
union bpf_attr;
|
||||
|
||||
extern const struct file_operations btf_fops;
|
||||
|
||||
void btf_put(struct btf *btf);
|
||||
int btf_new_fd(const union bpf_attr *attr);
|
||||
struct btf *btf_get_by_fd(int fd);
|
||||
int btf_get_info_by_fd(const struct btf *btf,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
/* Figure out the size of a type_id. If type_id is a modifier
|
||||
* (e.g. const), it will be resolved to find out the type with size.
|
||||
*
|
||||
* For example:
|
||||
* In describing "const void *", type_id is "const" and "const"
|
||||
* refers to "void *". The return type will be "void *".
|
||||
*
|
||||
* If type_id is a simple "int", then return type will be "int".
|
||||
*
|
||||
* @btf: struct btf object
|
||||
* @type_id: Find out the size of type_id. The type_id of the return
|
||||
* type is set to *type_id.
|
||||
* @ret_size: It can be NULL. If not NULL, the size of the return
|
||||
* type is set to *ret_size.
|
||||
* Return: The btf_type (resolved to another type with size info if needed).
|
||||
* NULL is returned if type_id itself does not have size info
|
||||
* (e.g. void) or it cannot be resolved to another type that
|
||||
* has size info.
|
||||
* *type_id and *ret_size will not be changed in the
|
||||
* NULL return case.
|
||||
*/
|
||||
const struct btf_type *btf_type_id_size(const struct btf *btf,
|
||||
u32 *type_id,
|
||||
u32 *ret_size);
|
||||
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
|
||||
struct seq_file *m);
|
||||
int btf_get_fd_by_id(u32 id);
|
||||
u32 btf_id(const struct btf *btf);
|
||||
|
||||
#endif
|
@@ -205,8 +205,6 @@ void write_boundary_block(struct block_device *bdev,
|
||||
sector_t bblock, unsigned blocksize);
|
||||
int bh_uptodate_or_lock(struct buffer_head *bh);
|
||||
int bh_submit_read(struct buffer_head *bh);
|
||||
loff_t page_cache_seek_hole_data(struct inode *inode, loff_t offset,
|
||||
loff_t length, int whence);
|
||||
|
||||
extern int buffer_heads_over_limit;
|
||||
|
||||
|
@@ -34,9 +34,8 @@ enum cache_type {
|
||||
* @shared_cpu_map: logical cpumask representing all the cpus sharing
|
||||
* this cache node
|
||||
* @attributes: bitfield representing various cache attributes
|
||||
* @of_node: if devicetree is used, this represents either the cpu node in
|
||||
* case there's no explicit cache node or the cache node itself in the
|
||||
* device tree
|
||||
* @fw_token: Unique value used to determine if different cacheinfo
|
||||
* structures represent a single hardware cache instance.
|
||||
* @disable_sysfs: indicates whether this node is visible to the user via
|
||||
* sysfs or not
|
||||
* @priv: pointer to any private data structure specific to particular
|
||||
@@ -65,8 +64,7 @@ struct cacheinfo {
|
||||
#define CACHE_ALLOCATE_POLICY_MASK \
|
||||
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
|
||||
#define CACHE_ID BIT(4)
|
||||
|
||||
struct device_node *of_node;
|
||||
void *fw_token;
|
||||
bool disable_sysfs;
|
||||
void *priv;
|
||||
};
|
||||
@@ -99,6 +97,23 @@ int func(unsigned int cpu) \
|
||||
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
|
||||
int init_cache_level(unsigned int cpu);
|
||||
int populate_cache_leaves(unsigned int cpu);
|
||||
int cache_setup_acpi(unsigned int cpu);
|
||||
#ifndef CONFIG_ACPI_PPTT
|
||||
/*
|
||||
* acpi_find_last_cache_level is only called on ACPI enabled
|
||||
* platforms using the PPTT for topology. This means that if
|
||||
* the platform supports other firmware configuration methods
|
||||
* we need to stub out the call when ACPI is disabled.
|
||||
* ACPI enabled platforms not using PPTT won't be making calls
|
||||
* to this function so we need not worry about them.
|
||||
*/
|
||||
static inline int acpi_find_last_cache_level(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int acpi_find_last_cache_level(unsigned int cpu);
|
||||
#endif
|
||||
|
||||
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
|
||||
|
||||
|
@@ -628,6 +628,7 @@ int ceph_flags_to_mode(int flags);
|
||||
CEPH_CAP_XATTR_SHARED)
|
||||
#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
|
||||
CEPH_CAP_FILE_RD)
|
||||
#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND
|
||||
|
||||
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
|
||||
CEPH_CAP_LINK_SHARED | \
|
||||
|
@@ -170,6 +170,7 @@ struct ceph_osd_request {
|
||||
u64 r_tid; /* unique for this client */
|
||||
struct rb_node r_node;
|
||||
struct rb_node r_mc_node; /* map check */
|
||||
struct work_struct r_complete_work;
|
||||
struct ceph_osd *r_osd;
|
||||
|
||||
struct ceph_osd_request_target r_t;
|
||||
@@ -201,7 +202,6 @@ struct ceph_osd_request {
|
||||
struct timespec r_mtime; /* ditto */
|
||||
u64 r_data_offset; /* ditto */
|
||||
bool r_linger; /* don't resend on failure */
|
||||
bool r_abort_on_full; /* return ENOSPC when full */
|
||||
|
||||
/* internal */
|
||||
unsigned long r_stamp; /* jiffies, send or check time */
|
||||
@@ -347,6 +347,8 @@ struct ceph_osd_client {
|
||||
struct rb_root linger_map_checks;
|
||||
atomic_t num_requests;
|
||||
atomic_t num_homeless;
|
||||
bool abort_on_full; /* abort w/ ENOSPC when full */
|
||||
int abort_err;
|
||||
struct delayed_work timeout_work;
|
||||
struct delayed_work osds_timeout_work;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@@ -359,6 +361,7 @@ struct ceph_osd_client {
|
||||
struct ceph_msgpool msgpool_op_reply;
|
||||
|
||||
struct workqueue_struct *notify_wq;
|
||||
struct workqueue_struct *completion_wq;
|
||||
};
|
||||
|
||||
static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
|
||||
@@ -378,6 +381,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
|
||||
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
|
||||
struct ceph_msg *msg);
|
||||
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
|
||||
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
|
||||
|
||||
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
|
||||
unsigned int which, u16 opcode, u32 flags);
|
||||
@@ -440,7 +444,7 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
|
||||
struct page **pages, u64 length,
|
||||
u32 alignment, bool pages_from_pool,
|
||||
bool own_pages);
|
||||
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
|
||||
extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
|
||||
unsigned int which, u16 opcode,
|
||||
const char *class, const char *method);
|
||||
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
|
||||
|
@@ -279,10 +279,10 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting,
|
||||
const struct ceph_osds *new_acting,
|
||||
bool any_change);
|
||||
|
||||
int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
|
||||
const struct ceph_object_id *oid,
|
||||
const struct ceph_object_locator *oloc,
|
||||
struct ceph_pg *raw_pgid);
|
||||
void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
|
||||
const struct ceph_object_id *oid,
|
||||
const struct ceph_object_locator *oloc,
|
||||
struct ceph_pg *raw_pgid);
|
||||
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
|
||||
const struct ceph_object_id *oid,
|
||||
const struct ceph_object_locator *oloc,
|
||||
|
@@ -1,25 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Filename: cfag12864b.h
|
||||
* Version: 0.1.0
|
||||
* Description: cfag12864b LCD driver header
|
||||
* License: GPLv2
|
||||
*
|
||||
* Author: Copyright (C) Miguel Ojeda Sandonis
|
||||
* Date: 2006-10-12
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CFAG12864B_H_
|
||||
|
@@ -105,6 +105,8 @@ enum {
|
||||
struct cgroup_file {
|
||||
/* do not access any fields from outside cgroup core */
|
||||
struct kernfs_node *kn;
|
||||
unsigned long notified_at;
|
||||
struct timer_list notify_timer;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -128,6 +130,9 @@ struct cgroup_subsys_state {
|
||||
struct list_head sibling;
|
||||
struct list_head children;
|
||||
|
||||
/* flush target list anchored at cgrp->rstat_css_list */
|
||||
struct list_head rstat_css_node;
|
||||
|
||||
/*
|
||||
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
|
||||
* matching css can be looked up using css_from_id().
|
||||
@@ -256,12 +261,16 @@ struct css_set {
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct cgroup_base_stat {
|
||||
struct task_cputime cputime;
|
||||
};
|
||||
|
||||
/*
|
||||
* cgroup basic resource usage statistics. Accounting is done per-cpu in
|
||||
* cgroup_cpu_stat which is then lazily propagated up the hierarchy on
|
||||
* reads.
|
||||
* rstat - cgroup scalable recursive statistics. Accounting is done
|
||||
* per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
|
||||
* hierarchy on reads.
|
||||
*
|
||||
* When a stat gets updated, the cgroup_cpu_stat and its ancestors are
|
||||
* When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
|
||||
* linked into the updated tree. On the following read, propagation only
|
||||
* considers and consumes the updated tree. This makes reading O(the
|
||||
* number of descendants which have been active since last read) instead of
|
||||
@@ -271,20 +280,24 @@ struct css_set {
|
||||
* aren't active and stat may be read frequently. The combination can
|
||||
* become very expensive. By propagating selectively, increasing reading
|
||||
* frequency decreases the cost of each read.
|
||||
*
|
||||
* This struct hosts both the fields which implement the above -
|
||||
* updated_children and updated_next - and the fields which track basic
|
||||
* resource statistics on top of it - bsync, bstat and last_bstat.
|
||||
*/
|
||||
struct cgroup_cpu_stat {
|
||||
struct cgroup_rstat_cpu {
|
||||
/*
|
||||
* ->sync protects all the current counters. These are the only
|
||||
* fields which get updated in the hot path.
|
||||
* ->bsync protects ->bstat. These are the only fields which get
|
||||
* updated in the hot path.
|
||||
*/
|
||||
struct u64_stats_sync sync;
|
||||
struct task_cputime cputime;
|
||||
struct u64_stats_sync bsync;
|
||||
struct cgroup_base_stat bstat;
|
||||
|
||||
/*
|
||||
* Snapshots at the last reading. These are used to calculate the
|
||||
* deltas to propagate to the global counters.
|
||||
*/
|
||||
struct task_cputime last_cputime;
|
||||
struct cgroup_base_stat last_bstat;
|
||||
|
||||
/*
|
||||
* Child cgroups with stat updates on this cpu since the last read
|
||||
@@ -295,18 +308,12 @@ struct cgroup_cpu_stat {
|
||||
* to the cgroup makes it unnecessary for each per-cpu struct to
|
||||
* point back to the associated cgroup.
|
||||
*
|
||||
* Protected by per-cpu cgroup_cpu_stat_lock.
|
||||
* Protected by per-cpu cgroup_rstat_cpu_lock.
|
||||
*/
|
||||
struct cgroup *updated_children; /* terminated by self cgroup */
|
||||
struct cgroup *updated_next; /* NULL iff not on the list */
|
||||
};
|
||||
|
||||
struct cgroup_stat {
|
||||
/* per-cpu statistics are collected into the folowing global counters */
|
||||
struct task_cputime cputime;
|
||||
struct prev_cputime prev_cputime;
|
||||
};
|
||||
|
||||
struct cgroup {
|
||||
/* self css with NULL ->ss, points back to this cgroup */
|
||||
struct cgroup_subsys_state self;
|
||||
@@ -406,10 +413,14 @@ struct cgroup {
|
||||
*/
|
||||
struct cgroup *dom_cgrp;
|
||||
|
||||
/* per-cpu recursive resource statistics */
|
||||
struct cgroup_rstat_cpu __percpu *rstat_cpu;
|
||||
struct list_head rstat_css_list;
|
||||
|
||||
/* cgroup basic resource statistics */
|
||||
struct cgroup_cpu_stat __percpu *cpu_stat;
|
||||
struct cgroup_stat pending_stat; /* pending from children */
|
||||
struct cgroup_stat stat;
|
||||
struct cgroup_base_stat pending_bstat; /* pending from children */
|
||||
struct cgroup_base_stat bstat;
|
||||
struct prev_cputime prev_cputime; /* for printing out cputime */
|
||||
|
||||
/*
|
||||
* list of pidlists, up to two for each namespace (one for procs, one
|
||||
@@ -570,6 +581,7 @@ struct cgroup_subsys {
|
||||
void (*css_released)(struct cgroup_subsys_state *css);
|
||||
void (*css_free)(struct cgroup_subsys_state *css);
|
||||
void (*css_reset)(struct cgroup_subsys_state *css);
|
||||
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
|
||||
int (*css_extra_stat_show)(struct seq_file *seq,
|
||||
struct cgroup_subsys_state *css);
|
||||
|
||||
|
@@ -690,11 +690,19 @@ static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
|
||||
char *buf, size_t buflen) {}
|
||||
#endif /* !CONFIG_CGROUPS */
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
/*
|
||||
* cgroup scalable recursive statistics.
|
||||
*/
|
||||
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
|
||||
void cgroup_rstat_flush(struct cgroup *cgrp);
|
||||
void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
|
||||
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
|
||||
void cgroup_rstat_flush_release(void);
|
||||
|
||||
/*
|
||||
* Basic resource stats.
|
||||
*/
|
||||
#ifdef CONFIG_CGROUPS
|
||||
|
||||
#ifdef CONFIG_CGROUP_CPUACCT
|
||||
void cpuacct_charge(struct task_struct *tsk, u64 cputime);
|
||||
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* See Documentation/circular-buffers.txt for more information.
|
||||
* See Documentation/core-api/circular-buffers.rst for more information.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CIRC_BUF_H
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_clk.h>
|
||||
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
|
||||
@@ -218,7 +219,7 @@ struct clk_ops {
|
||||
int (*get_phase)(struct clk_hw *hw);
|
||||
int (*set_phase)(struct clk_hw *hw, int degrees);
|
||||
void (*init)(struct clk_hw *hw);
|
||||
int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
|
||||
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -805,8 +806,6 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
|
||||
|
||||
struct of_device_id;
|
||||
|
||||
typedef void (*of_clk_init_cb_t)(struct device_node *);
|
||||
|
||||
struct clk_onecell_data {
|
||||
struct clk **clks;
|
||||
unsigned int clk_num;
|
||||
@@ -893,13 +892,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
|
||||
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
|
||||
struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
|
||||
void *data);
|
||||
unsigned int of_clk_get_parent_count(struct device_node *np);
|
||||
int of_clk_parent_fill(struct device_node *np, const char **parents,
|
||||
unsigned int size);
|
||||
const char *of_clk_get_parent_name(struct device_node *np, int index);
|
||||
int of_clk_detect_critical(struct device_node *np, int index,
|
||||
unsigned long *flags);
|
||||
void of_clk_init(const struct of_device_id *matches);
|
||||
|
||||
#else /* !CONFIG_OF */
|
||||
|
||||
@@ -946,26 +942,16 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
static inline unsigned int of_clk_get_parent_count(struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int of_clk_parent_fill(struct device_node *np,
|
||||
const char **parents, unsigned int size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline const char *of_clk_get_parent_name(struct device_node *np,
|
||||
int index)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int of_clk_detect_critical(struct device_node *np, int index,
|
||||
unsigned long *flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void of_clk_init(const struct of_device_id *matches) {}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
/*
|
||||
@@ -999,10 +985,5 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
|
||||
|
||||
#endif /* platform dependent I/O accessors */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
|
||||
void *data, const struct file_operations *fops);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_COMMON_CLK */
|
||||
#endif /* CLK_PROVIDER_H */
|
||||
|
40
include/linux/clk/davinci.h
Normal file
40
include/linux/clk/davinci.h
Normal file
@@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Clock drivers for TI DaVinci PLL and PSC controllers
|
||||
*
|
||||
* Copyright (C) 2018 David Lechner <david@lechnology.com>
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_CLK_DAVINCI_PLL_H___
|
||||
#define __LINUX_CLK_DAVINCI_PLL_H___
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
/* function for registering clocks in early boot */
|
||||
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DA830
|
||||
int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DA850
|
||||
int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM355
|
||||
int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
int dm355_psc_init(struct device *dev, void __iomem *base);
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM365
|
||||
int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
int dm365_psc_init(struct device *dev, void __iomem *base);
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM644x
|
||||
int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
int dm644x_psc_init(struct device *dev, void __iomem *base);
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM646x
|
||||
int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
|
||||
int dm646x_psc_init(struct device *dev, void __iomem *base);
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
|
@@ -7,8 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <linux/compat_time.h>
|
||||
|
||||
#include <linux/stat.h>
|
||||
#include <linux/param.h> /* for HZ */
|
||||
@@ -21,8 +20,11 @@
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/compat.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <asm/siginfo.h>
|
||||
#include <asm/signal.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
|
||||
/*
|
||||
@@ -83,6 +85,8 @@
|
||||
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
|
||||
#endif /* COMPAT_SYSCALL_DEFINEx */
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#ifndef compat_user_stack_pointer
|
||||
#define compat_user_stack_pointer() current_user_stack_pointer()
|
||||
#endif
|
||||
@@ -290,8 +294,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
|
||||
extern int compat_put_timespec(const struct timespec *, void __user *);
|
||||
extern int compat_get_timeval(struct timeval *, const void __user *);
|
||||
extern int compat_put_timeval(const struct timeval *, void __user *);
|
||||
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
|
||||
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
|
||||
extern int get_compat_itimerspec64(struct itimerspec64 *its,
|
||||
const struct compat_itimerspec __user *uits);
|
||||
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
|
||||
@@ -330,6 +332,7 @@ extern int put_compat_rusage(const struct rusage *,
|
||||
struct compat_rusage __user *);
|
||||
|
||||
struct compat_siginfo;
|
||||
struct __compat_aio_sigset;
|
||||
|
||||
struct compat_dirent {
|
||||
u32 d_ino;
|
||||
@@ -553,6 +556,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
|
||||
compat_long_t nr,
|
||||
struct io_event __user *events,
|
||||
struct compat_timespec __user *timeout);
|
||||
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
|
||||
compat_long_t min_nr,
|
||||
compat_long_t nr,
|
||||
struct io_event __user *events,
|
||||
struct compat_timespec __user *timeout,
|
||||
const struct __compat_aio_sigset __user *usig);
|
||||
|
||||
/* fs/cookies.c */
|
||||
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
|
||||
@@ -1016,7 +1025,9 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
|
||||
#else /* !CONFIG_COMPAT */
|
||||
|
||||
#define is_compat_task() (0)
|
||||
#ifndef in_compat_syscall
|
||||
static inline bool in_compat_syscall(void) { return false; }
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
23
include/linux/compat_time.h
Normal file
23
include/linux/compat_time.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_COMPAT_TIME_H
|
||||
#define _LINUX_COMPAT_TIME_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/time64.h>
|
||||
|
||||
typedef s32 compat_time_t;
|
||||
|
||||
struct compat_timespec {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_nsec;
|
||||
};
|
||||
|
||||
struct compat_timeval {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_usec;
|
||||
};
|
||||
|
||||
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
|
||||
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
|
||||
|
||||
#endif /* _LINUX_COMPAT_TIME_H */
|
@@ -32,3 +32,17 @@
|
||||
#ifdef __noretpoline
|
||||
#undef __noretpoline
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Not all versions of clang implement the the type-generic versions
|
||||
* of the builtin overflow checkers. Fortunately, clang implements
|
||||
* __has_builtin allowing us to avoid awkward version
|
||||
* checks. Unfortunately, we don't know which version of gcc clang
|
||||
* pretends to be, so the macro may or may not be defined.
|
||||
*/
|
||||
#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
|
||||
#if __has_builtin(__builtin_mul_overflow) && \
|
||||
__has_builtin(__builtin_add_overflow) && \
|
||||
__has_builtin(__builtin_sub_overflow)
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
@@ -343,3 +343,7 @@
|
||||
* code
|
||||
*/
|
||||
#define uninitialized_var(x) x = x
|
||||
|
||||
#if GCC_VERSION >= 50100
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
@@ -44,3 +44,7 @@
|
||||
#define __builtin_bswap16 _bswap16
|
||||
#endif
|
||||
|
||||
/*
|
||||
* icc defines __GNUC__, but does not implement the builtin overflow checkers.
|
||||
*/
|
||||
#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
|
||||
|
@@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
#define __branch_check__(x, expect, is_constant) ({ \
|
||||
int ______r; \
|
||||
long ______r; \
|
||||
static struct ftrace_likely_data \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_annotated_branch"))) \
|
||||
|
@@ -1,13 +1,6 @@
|
||||
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CORESIGHT_H
|
||||
|
@@ -381,7 +381,7 @@ struct cper_sec_proc_generic {
|
||||
/* IA32/X64 Processor Error Section */
|
||||
struct cper_sec_proc_ia {
|
||||
__u64 validation_bits;
|
||||
__u8 lapic_id;
|
||||
__u64 lapic_id;
|
||||
__u8 cpuid[48];
|
||||
};
|
||||
|
||||
@@ -551,5 +551,7 @@ const char *cper_mem_err_unpack(struct trace_seq *,
|
||||
struct cper_mem_err_compact *);
|
||||
void cper_print_proc_arm(const char *pfx,
|
||||
const struct cper_sec_proc_arm *proc);
|
||||
void cper_print_proc_ia(const char *pfx,
|
||||
const struct cper_sec_proc_ia *proc);
|
||||
|
||||
#endif
|
||||
|
@@ -571,7 +571,7 @@ struct governor_attr {
|
||||
size_t count);
|
||||
};
|
||||
|
||||
static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
|
||||
static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
|
||||
{
|
||||
/*
|
||||
* Allow remote callbacks if:
|
||||
|
@@ -258,6 +258,7 @@ struct cpuidle_governor {
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
|
||||
extern int cpuidle_governor_latency_req(unsigned int cpu);
|
||||
#else
|
||||
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
|
||||
{return 0;}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/elf.h>
|
||||
#include <uapi/linux/vmcore.h>
|
||||
|
||||
#include <asm/pgtable.h> /* for pgprot_t */
|
||||
|
||||
@@ -93,4 +94,21 @@ static inline bool is_kdump_kernel(void) { return 0; }
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
extern unsigned long saved_max_pfn;
|
||||
|
||||
/* Device Dump information to be filled by drivers */
|
||||
struct vmcoredd_data {
|
||||
char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
|
||||
unsigned int size; /* Size of the dump */
|
||||
/* Driver's registered callback to be invoked to collect dump */
|
||||
int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
|
||||
int vmcore_add_device_dump(struct vmcoredd_data *data);
|
||||
#else
|
||||
static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
|
||||
#endif /* LINUX_CRASHDUMP_H */
|
||||
|
@@ -20,6 +20,9 @@ struct dax_operations {
|
||||
/* copy_from_iter: required operation for fs-dax direct-i/o */
|
||||
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
|
||||
struct iov_iter *);
|
||||
/* copy_to_iter: required operation for fs-dax direct-i/o */
|
||||
size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
|
||||
struct iov_iter *);
|
||||
};
|
||||
|
||||
extern struct attribute_group dax_attribute_group;
|
||||
@@ -64,10 +67,10 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
|
||||
struct writeback_control;
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
|
||||
static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||
{
|
||||
return __bdev_dax_supported(sb, blocksize);
|
||||
return __bdev_dax_supported(bdev, blocksize);
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
@@ -83,10 +86,13 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
|
||||
int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc);
|
||||
|
||||
struct page *dax_layout_busy_page(struct address_space *mapping);
|
||||
#else
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
static inline bool bdev_dax_supported(struct block_device *bdev,
|
||||
int blocksize)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
@@ -103,6 +109,11 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc)
|
||||
{
|
||||
@@ -118,14 +129,16 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn);
|
||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
|
||||
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops);
|
||||
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
|
||||
int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||
pfn_t pfn);
|
||||
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
||||
enum page_entry_size pe_size, pfn_t pfn);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
|
@@ -29,7 +29,7 @@
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
unsigned int flags; /* Private per-task flags */
|
||||
|
||||
/* For each stat XXX, add following, aligned appropriately
|
||||
|
@@ -133,7 +133,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
|
||||
*/
|
||||
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
||||
typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||
typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||
void *addr, size_t bytes, struct iov_iter *i);
|
||||
#define PAGE_SECTORS (PAGE_SIZE / 512)
|
||||
|
||||
@@ -184,7 +184,8 @@ struct target_type {
|
||||
dm_iterate_devices_fn iterate_devices;
|
||||
dm_io_hints_fn io_hints;
|
||||
dm_dax_direct_access_fn direct_access;
|
||||
dm_dax_copy_from_iter_fn dax_copy_from_iter;
|
||||
dm_dax_copy_iter_fn dax_copy_from_iter;
|
||||
dm_dax_copy_iter_fn dax_copy_to_iter;
|
||||
|
||||
/* For internal device-mapper use. */
|
||||
struct list_head list;
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <asm/device.h>
|
||||
|
||||
struct device;
|
||||
@@ -88,6 +89,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
* @resume: Called to bring a device on this bus out of sleep mode.
|
||||
* @num_vf: Called to find out how many virtual functions a device on this
|
||||
* bus supports.
|
||||
* @dma_configure: Called to setup DMA configuration on a device on
|
||||
this bus.
|
||||
* @pm: Power management operations of this bus, callback the specific
|
||||
* device driver's pm-ops.
|
||||
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
|
||||
@@ -96,8 +99,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
* @p: The private data of the driver core, only the driver core can
|
||||
* touch this.
|
||||
* @lock_key: Lock class key for use by the lock validator
|
||||
* @force_dma: Assume devices on this bus should be set up by dma_configure()
|
||||
* even if DMA capability is not explicitly described by firmware.
|
||||
* @need_parent_lock: When probing or removing a device on this bus, the
|
||||
* device core should lock the device's parent.
|
||||
*
|
||||
* A bus is a channel between the processor and one or more devices. For the
|
||||
* purposes of the device model, all devices are connected via a bus, even if
|
||||
@@ -130,6 +133,8 @@ struct bus_type {
|
||||
|
||||
int (*num_vf)(struct device *dev);
|
||||
|
||||
int (*dma_configure)(struct device *dev);
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
const struct iommu_ops *iommu_ops;
|
||||
@@ -137,7 +142,7 @@ struct bus_type {
|
||||
struct subsys_private *p;
|
||||
struct lock_class_key lock_key;
|
||||
|
||||
bool force_dma;
|
||||
bool need_parent_lock;
|
||||
};
|
||||
|
||||
extern int __must_check bus_register(struct bus_type *bus);
|
||||
@@ -668,9 +673,12 @@ static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
||||
static inline void *devm_kmalloc_array(struct device *dev,
|
||||
size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
if (size != 0 && n > SIZE_MAX / size)
|
||||
size_t bytes;
|
||||
|
||||
if (unlikely(check_mul_overflow(n, size, &bytes)))
|
||||
return NULL;
|
||||
return devm_kmalloc(dev, n * size, flags);
|
||||
|
||||
return devm_kmalloc(dev, bytes, flags);
|
||||
}
|
||||
static inline void *devm_kcalloc(struct device *dev,
|
||||
size_t n, size_t size, gfp_t flags)
|
||||
@@ -904,6 +912,8 @@ struct dev_links_info {
|
||||
* @offline: Set after successful invocation of bus type's .offline().
|
||||
* @of_node_reused: Set if the device-tree node is shared with an ancestor
|
||||
* device.
|
||||
* @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
|
||||
* indicates support for a higher limit in the dma_mask field.
|
||||
*
|
||||
* At the lowest level, every device in a Linux system is represented by an
|
||||
* instance of struct device. The device structure contains the information
|
||||
@@ -992,6 +1002,7 @@ struct device {
|
||||
bool offline_disabled:1;
|
||||
bool offline:1;
|
||||
bool of_node_reused:1;
|
||||
bool dma_32bit_limit:1;
|
||||
};
|
||||
|
||||
static inline struct device *kobj_to_dev(struct kobject *kobj)
|
||||
|
@@ -30,8 +30,6 @@ struct bus_type;
|
||||
|
||||
extern void dma_debug_add_bus(struct bus_type *bus);
|
||||
|
||||
extern void dma_debug_init(u32 num_entries);
|
||||
|
||||
extern int dma_debug_resize_entries(u32 num_entries);
|
||||
|
||||
extern void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
@@ -100,10 +98,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_debug_init(u32 num_entries)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dma_debug_resize_entries(u32 num_entries)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
@@ -94,11 +94,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
|
||||
struct dma_fence_cb *cb);
|
||||
|
||||
/**
|
||||
* struct dma_fence_cb - callback for dma_fence_add_callback
|
||||
* @node: used by dma_fence_add_callback to append this struct to fence::cb_list
|
||||
* struct dma_fence_cb - callback for dma_fence_add_callback()
|
||||
* @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
|
||||
* @func: dma_fence_func_t to call
|
||||
*
|
||||
* This struct will be initialized by dma_fence_add_callback, additional
|
||||
* This struct will be initialized by dma_fence_add_callback(), additional
|
||||
* data can be passed along by embedding dma_fence_cb in another struct.
|
||||
*/
|
||||
struct dma_fence_cb {
|
||||
@@ -108,75 +108,143 @@ struct dma_fence_cb {
|
||||
|
||||
/**
|
||||
* struct dma_fence_ops - operations implemented for fence
|
||||
* @get_driver_name: returns the driver name.
|
||||
* @get_timeline_name: return the name of the context this fence belongs to.
|
||||
* @enable_signaling: enable software signaling of fence.
|
||||
* @signaled: [optional] peek whether the fence is signaled, can be null.
|
||||
* @wait: custom wait implementation, or dma_fence_default_wait.
|
||||
* @release: [optional] called on destruction of fence, can be null
|
||||
* @fill_driver_data: [optional] callback to fill in free-form debug info
|
||||
* Returns amount of bytes filled, or -errno.
|
||||
* @fence_value_str: [optional] fills in the value of the fence as a string
|
||||
* @timeline_value_str: [optional] fills in the current value of the timeline
|
||||
* as a string
|
||||
*
|
||||
* Notes on enable_signaling:
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* irqs, or insert commands into cmdstream, etc. This is called
|
||||
* in the first wait() or add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* fence->error may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling dma_fence_signal before enable_signaling is called allows
|
||||
* for a tiny race window in which enable_signaling is called during,
|
||||
* before, or after dma_fence_signal. To fight this, it is recommended
|
||||
* that before enable_signaling returns true an extra reference is
|
||||
* taken on the fence, to be released when the fence is signaled.
|
||||
* This will mean dma_fence_signal will still be called twice, but
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->error if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* Notes on release:
|
||||
* Can be NULL, this function allows additional commands to run on
|
||||
* destruction of the fence. Can be called from irq context.
|
||||
* If pointer is set to NULL, kfree will get called instead.
|
||||
*/
|
||||
|
||||
struct dma_fence_ops {
|
||||
/**
|
||||
* @get_driver_name:
|
||||
*
|
||||
* Returns the driver name. This is a callback to allow drivers to
|
||||
* compute the name at runtime, without having it to store permanently
|
||||
* for each fence, or build a cache of some sort.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
const char * (*get_driver_name)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @get_timeline_name:
|
||||
*
|
||||
* Return the name of the context this fence belongs to. This is a
|
||||
* callback to allow drivers to compute the name at runtime, without
|
||||
* having it to store permanently for each fence, or build a cache of
|
||||
* some sort.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
const char * (*get_timeline_name)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @enable_signaling:
|
||||
*
|
||||
* Enable software signaling of fence.
|
||||
*
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* interrupts, or insert commands into cmdstream, etc, to avoid these
|
||||
* costly operations for the common case where only hw->hw
|
||||
* synchronization is required. This is called in the first
|
||||
* dma_fence_wait() or dma_fence_add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on the
|
||||
* signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* &dma_fence.error may be set in enable_signaling, but only when false
|
||||
* is returned.
|
||||
*
|
||||
* Since many implementations can call dma_fence_signal() even when before
|
||||
* @enable_signaling has been called there's a race window, where the
|
||||
* dma_fence_signal() might result in the final fence reference being
|
||||
* released and its memory freed. To avoid this, implementations of this
|
||||
* callback should grab their own reference using dma_fence_get(), to be
|
||||
* released when the fence is signalled (through e.g. the interrupt
|
||||
* handler).
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
bool (*enable_signaling)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @signaled:
|
||||
*
|
||||
* Peek whether the fence is signaled, as a fastpath optimization for
|
||||
* e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
|
||||
* callback does not need to make any guarantees beyond that a fence
|
||||
* once indicates as signalled must always return true from this
|
||||
* callback. This callback may return false even if the fence has
|
||||
* completed already, in this case information hasn't propogated throug
|
||||
* the system yet. See also dma_fence_is_signaled().
|
||||
*
|
||||
* May set &dma_fence.error if returning true.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
bool (*signaled)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @wait:
|
||||
*
|
||||
* Custom wait implementation, or dma_fence_default_wait.
|
||||
*
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
signed long (*wait)(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
|
||||
/**
|
||||
* @release:
|
||||
*
|
||||
* Called on destruction of fence to release additional resources.
|
||||
* Can be called from irq context. This callback is optional. If it is
|
||||
* NULL, then dma_fence_free() is instead called as the default
|
||||
* implementation.
|
||||
*/
|
||||
void (*release)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @fill_driver_data:
|
||||
*
|
||||
* Callback to fill in free-form debug info.
|
||||
*
|
||||
* Returns amount of bytes filled, or negative error on failure.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
|
||||
|
||||
/**
|
||||
* @fence_value_str:
|
||||
*
|
||||
* Callback to fill in free-form debug info specific to this fence, like
|
||||
* the sequence number.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
|
||||
|
||||
/**
|
||||
* @timeline_value_str:
|
||||
*
|
||||
* Fills in the current value of the timeline as a string, like the
|
||||
* sequence number. This should match what @fill_driver_data prints for
|
||||
* the most recently signalled fence (assuming no delayed signalling).
|
||||
*/
|
||||
void (*timeline_value_str)(struct dma_fence *fence,
|
||||
char *str, int size);
|
||||
};
|
||||
@@ -189,7 +257,7 @@ void dma_fence_free(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* dma_fence_put - decreases refcount of the fence
|
||||
* @fence: [in] fence to reduce refcount of
|
||||
* @fence: fence to reduce refcount of
|
||||
*/
|
||||
static inline void dma_fence_put(struct dma_fence *fence)
|
||||
{
|
||||
@@ -199,7 +267,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
|
||||
|
||||
/**
|
||||
* dma_fence_get - increases refcount of the fence
|
||||
* @fence: [in] fence to increase refcount of
|
||||
* @fence: fence to increase refcount of
|
||||
*
|
||||
* Returns the same fence, with refcount increased by 1.
|
||||
*/
|
||||
@@ -213,7 +281,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
|
||||
/**
|
||||
* dma_fence_get_rcu - get a fence from a reservation_object_list with
|
||||
* rcu read lock
|
||||
* @fence: [in] fence to increase refcount of
|
||||
* @fence: fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
*/
|
||||
@@ -227,7 +295,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
|
||||
|
||||
/**
|
||||
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
|
||||
* @fencep: [in] pointer to fence to increase refcount of
|
||||
* @fencep: pointer to fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
* This function handles acquiring a reference to a fence that may be
|
||||
@@ -289,14 +357,16 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
|
||||
/**
|
||||
* dma_fence_is_signaled_locked - Return an indication if the fence
|
||||
* is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
* @fence: the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
* true if dma_fence_add_callback(), dma_fence_wait() or
|
||||
* dma_fence_enable_sw_signaling() haven't been called before.
|
||||
*
|
||||
* This function requires fence->lock to be held.
|
||||
* This function requires &dma_fence.lock to be held.
|
||||
*
|
||||
* See also dma_fence_is_signaled().
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled_locked(struct dma_fence *fence)
|
||||
@@ -314,17 +384,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
|
||||
|
||||
/**
|
||||
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
* @fence: the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
* true if dma_fence_add_callback(), dma_fence_wait() or
|
||||
* dma_fence_enable_sw_signaling() haven't been called before.
|
||||
*
|
||||
* It's recommended for seqno fences to call dma_fence_signal when the
|
||||
* operation is complete, it makes it possible to prevent issues from
|
||||
* wraparound between time of issue and time of use by checking the return
|
||||
* value of this function before calling hardware-specific wait instructions.
|
||||
*
|
||||
* See also dma_fence_is_signaled_locked().
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled(struct dma_fence *fence)
|
||||
@@ -342,8 +414,8 @@ dma_fence_is_signaled(struct dma_fence *fence)
|
||||
|
||||
/**
|
||||
* __dma_fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence's seqno
|
||||
* @f2: [in] the second fence's seqno from the same context
|
||||
* @f1: the first fence's seqno
|
||||
* @f2: the second fence's seqno from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not common across contexts.
|
||||
@@ -355,8 +427,8 @@ static inline bool __dma_fence_is_later(u32 f1, u32 f2)
|
||||
|
||||
/**
|
||||
* dma_fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
* @f1: the first fence from the same context
|
||||
* @f2: the second fence from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not re-used across contexts.
|
||||
@@ -372,8 +444,8 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
|
||||
|
||||
/**
|
||||
* dma_fence_later - return the chronologically later fence
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
* @f1: the first fence from the same context
|
||||
* @f2: the second fence from the same context
|
||||
*
|
||||
* Returns NULL if both fences are signaled, otherwise the fence that would be
|
||||
* signaled last. Both fences must be from the same context, since a seqno is
|
||||
@@ -398,7 +470,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
|
||||
|
||||
/**
|
||||
* dma_fence_get_status_locked - returns the status upon completion
|
||||
* @fence: [in] the dma_fence to query
|
||||
* @fence: the dma_fence to query
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence (to indicate whether the fence was completed due to an error
|
||||
@@ -422,8 +494,8 @@ int dma_fence_get_status(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* dma_fence_set_error - flag an error condition on the fence
|
||||
* @fence: [in] the dma_fence
|
||||
* @error: [in] the error to store
|
||||
* @fence: the dma_fence
|
||||
* @error: the error to store
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence, to indicate that the fence was completed due to an error
|
||||
@@ -449,8 +521,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
||||
|
||||
/**
|
||||
* dma_fence_wait - sleep until the fence gets signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
* @fence: the fence to wait on
|
||||
* @intr: if true, do an interruptible wait
|
||||
*
|
||||
* This function will return -ERESTARTSYS if interrupted by a signal,
|
||||
* or 0 if the fence was signaled. Other error values may be
|
||||
@@ -459,6 +531,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
||||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly holds a reference to the fence, otherwise the
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*
|
||||
* See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
|
||||
*/
|
||||
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
|
||||
{
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#define __DMA_IOMMU_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/types.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
@@ -133,10 +133,10 @@ struct dma_map_ops {
|
||||
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
#endif
|
||||
int is_phys;
|
||||
};
|
||||
|
||||
extern const struct dma_map_ops dma_direct_ops;
|
||||
extern const struct dma_map_ops dma_noncoherent_ops;
|
||||
extern const struct dma_map_ops dma_virt_ops;
|
||||
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||
@@ -502,7 +502,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
|
||||
#ifndef arch_dma_alloc_attrs
|
||||
#define arch_dma_alloc_attrs(dev, flag) (true)
|
||||
#define arch_dma_alloc_attrs(dev) (true)
|
||||
#endif
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
@@ -521,7 +521,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
/* let the implementation decide on the zone to allocate from: */
|
||||
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if (!arch_dma_alloc_attrs(&dev, &flag))
|
||||
if (!arch_dma_alloc_attrs(&dev))
|
||||
return NULL;
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
@@ -572,14 +572,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
|
||||
* don't use this in new code.
|
||||
*/
|
||||
#ifndef arch_dma_supported
|
||||
#define arch_dma_supported(dev, mask) (1)
|
||||
#endif
|
||||
|
||||
static inline void dma_check_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
|
||||
@@ -592,9 +584,6 @@ static inline int dma_supported(struct device *dev, u64 mask)
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
if (!arch_dma_supported(dev, mask))
|
||||
return 0;
|
||||
|
||||
if (!ops->dma_supported)
|
||||
return 1;
|
||||
return ops->dma_supported(dev, mask);
|
||||
@@ -839,7 +828,7 @@ static inline int dma_mmap_wc(struct device *dev,
|
||||
#define dma_mmap_writecombine dma_mmap_wc
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
|
||||
#ifdef CONFIG_NEED_DMA_MAP_STATE
|
||||
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
|
||||
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
|
||||
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
|
||||
|
47
include/linux/dma-noncoherent.h
Normal file
47
include/linux/dma-noncoherent.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_DMA_NONCOHERENT_H
|
||||
#define _LINUX_DMA_NONCOHERENT_H 1
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT_MMAP
|
||||
int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#else
|
||||
#define arch_dma_mmap NULL
|
||||
#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
#else
|
||||
#define arch_dma_cache_sync NULL
|
||||
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void arch_sync_dma_for_device(struct device *dev,
|
||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void arch_sync_dma_for_cpu(struct device *dev,
|
||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
|
||||
|
||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
61
include/linux/dma/sprd-dma.h
Normal file
61
include/linux/dma/sprd-dma.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _SPRD_DMA_H_
|
||||
#define _SPRD_DMA_H_
|
||||
|
||||
#define SPRD_DMA_REQ_SHIFT 16
|
||||
#define SPRD_DMA_FLAGS(req_mode, int_type) \
|
||||
((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
|
||||
|
||||
/*
|
||||
* enum sprd_dma_req_mode: define the DMA request mode
|
||||
* @SPRD_DMA_FRAG_REQ: fragment request mode
|
||||
* @SPRD_DMA_BLK_REQ: block request mode
|
||||
* @SPRD_DMA_TRANS_REQ: transaction request mode
|
||||
* @SPRD_DMA_LIST_REQ: link-list request mode
|
||||
*
|
||||
* We have 4 types request mode: fragment mode, block mode, transaction mode
|
||||
* and linklist mode. One transaction can contain several blocks, one block can
|
||||
* contain several fragments. Link-list mode means we can save several DMA
|
||||
* configuration into one reserved memory, then DMA can fetch each DMA
|
||||
* configuration automatically to start transfer.
|
||||
*/
|
||||
enum sprd_dma_req_mode {
|
||||
SPRD_DMA_FRAG_REQ,
|
||||
SPRD_DMA_BLK_REQ,
|
||||
SPRD_DMA_TRANS_REQ,
|
||||
SPRD_DMA_LIST_REQ,
|
||||
};
|
||||
|
||||
/*
|
||||
* enum sprd_dma_int_type: define the DMA interrupt type
|
||||
* @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
|
||||
* @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
|
||||
* is done.
|
||||
* @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
|
||||
* @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
|
||||
* or one block request is done.
|
||||
* @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
|
||||
* request is done.
|
||||
* @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
|
||||
* transaction request or fragment request is done.
|
||||
* @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
|
||||
* transaction request or block request is done.
|
||||
* @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
|
||||
* is done.
|
||||
* @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
|
||||
* incorrect.
|
||||
*/
|
||||
enum sprd_dma_int_type {
|
||||
SPRD_DMA_NO_INT,
|
||||
SPRD_DMA_FRAG_INT,
|
||||
SPRD_DMA_BLK_INT,
|
||||
SPRD_DMA_BLK_FRAG_INT,
|
||||
SPRD_DMA_TRANS_INT,
|
||||
SPRD_DMA_TRANS_FRAG_INT,
|
||||
SPRD_DMA_TRANS_BLK_INT,
|
||||
SPRD_DMA_LIST_INT,
|
||||
SPRD_DMA_CFGERR_INT,
|
||||
};
|
||||
|
||||
#endif
|
@@ -397,7 +397,7 @@ typedef struct {
|
||||
u32 set_bar_attributes;
|
||||
u64 romsize;
|
||||
u32 romimage;
|
||||
} efi_pci_io_protocol_32;
|
||||
} efi_pci_io_protocol_32_t;
|
||||
|
||||
typedef struct {
|
||||
u64 poll_mem;
|
||||
@@ -417,7 +417,7 @@ typedef struct {
|
||||
u64 set_bar_attributes;
|
||||
u64 romsize;
|
||||
u64 romimage;
|
||||
} efi_pci_io_protocol_64;
|
||||
} efi_pci_io_protocol_64_t;
|
||||
|
||||
typedef struct {
|
||||
void *poll_mem;
|
||||
@@ -437,7 +437,7 @@ typedef struct {
|
||||
void *set_bar_attributes;
|
||||
uint64_t romsize;
|
||||
void *romimage;
|
||||
} efi_pci_io_protocol;
|
||||
} efi_pci_io_protocol_t;
|
||||
|
||||
#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
|
||||
#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
|
||||
|
@@ -218,8 +218,6 @@ extern void elv_unregister(struct elevator_type *);
|
||||
extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
||||
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
||||
extern struct elevator_queue *elevator_alloc(struct request_queue *,
|
||||
struct elevator_type *);
|
||||
|
@@ -312,6 +312,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
||||
* by kernel. Returns a negative error code or zero.
|
||||
* @get_fecparam: Get the network device Forward Error Correction parameters.
|
||||
* @set_fecparam: Set the network device Forward Error Correction parameters.
|
||||
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
|
||||
* This is only useful if the device maintains PHY statistics and
|
||||
* cannot use the standard PHY library helpers.
|
||||
*
|
||||
* All operations are optional (i.e. the function pointer may be set
|
||||
* to %NULL) and callers must take this into account. Callers must
|
||||
@@ -407,5 +410,7 @@ struct ethtool_ops {
|
||||
struct ethtool_fecparam *);
|
||||
int (*set_fecparam)(struct net_device *,
|
||||
struct ethtool_fecparam *);
|
||||
void (*get_ethtool_phy_stats)(struct net_device *,
|
||||
struct ethtool_stats *, u64 *);
|
||||
};
|
||||
#endif /* _LINUX_ETHTOOL_H */
|
||||
|
@@ -10,14 +10,8 @@
|
||||
* hackers place grumpy comments in header files.
|
||||
*/
|
||||
|
||||
/* Some toolchains use a `_' prefix for all user symbols. */
|
||||
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
|
||||
#define __VMLINUX_SYMBOL(x) _##x
|
||||
#define __VMLINUX_SYMBOL_STR(x) "_" #x
|
||||
#else
|
||||
#define __VMLINUX_SYMBOL(x) x
|
||||
#define __VMLINUX_SYMBOL_STR(x) #x
|
||||
#endif
|
||||
|
||||
/* Indirect, so macros are expanded before pasting. */
|
||||
#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
|
||||
@@ -46,14 +40,14 @@ extern struct module __this_module;
|
||||
#if defined(CONFIG_MODULE_REL_CRCS)
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
|
||||
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
|
||||
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
|
||||
" .weak __crc_" #sym " \n" \
|
||||
" .long __crc_" #sym " - . \n" \
|
||||
" .previous \n");
|
||||
#else
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
|
||||
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
|
||||
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
|
||||
" .weak __crc_" #sym " \n" \
|
||||
" .long __crc_" #sym " \n" \
|
||||
" .previous \n");
|
||||
#endif
|
||||
#else
|
||||
@@ -66,7 +60,7 @@ extern struct module __this_module;
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), aligned(1))) \
|
||||
= VMLINUX_SYMBOL_STR(sym); \
|
||||
= #sym; \
|
||||
static const struct kernel_symbol __ksymtab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
|
||||
|
@@ -30,6 +30,7 @@ struct sock;
|
||||
struct seccomp_data;
|
||||
struct bpf_prog_aux;
|
||||
struct xdp_rxq_info;
|
||||
struct xdp_buff;
|
||||
|
||||
/* ArgX, context and stack frame pointer register positions. Note,
|
||||
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
|
||||
@@ -46,7 +47,9 @@ struct xdp_rxq_info;
|
||||
/* Additional register mappings for converted user programs. */
|
||||
#define BPF_REG_A BPF_REG_0
|
||||
#define BPF_REG_X BPF_REG_7
|
||||
#define BPF_REG_TMP BPF_REG_8
|
||||
#define BPF_REG_TMP BPF_REG_2 /* scratch reg */
|
||||
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
|
||||
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
|
||||
|
||||
/* Kernel hidden auxiliary/helper register for hardening step.
|
||||
* Only used by eBPF JITs. It's nothing more than a temporary
|
||||
@@ -286,8 +289,21 @@ struct xdp_rxq_info;
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Relative call */
|
||||
|
||||
#define BPF_CALL_REL(TGT) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_CALL, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = BPF_PSEUDO_CALL, \
|
||||
.off = 0, \
|
||||
.imm = TGT })
|
||||
|
||||
/* Function call */
|
||||
|
||||
#define BPF_CAST_CALL(x) \
|
||||
((u64 (*)(u64, u64, u64, u64, u64))(x))
|
||||
|
||||
#define BPF_EMIT_CALL(FUNC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_CALL, \
|
||||
@@ -467,7 +483,8 @@ struct bpf_prog {
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
blinded:1, /* Was blinded */
|
||||
is_func:1, /* program is a bpf function */
|
||||
kprobe_override:1; /* Do we override a kprobe? */
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1; /* callchain buffer allocated? */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
@@ -500,14 +517,6 @@ struct bpf_skb_data_end {
|
||||
void *data_end;
|
||||
};
|
||||
|
||||
struct xdp_buff {
|
||||
void *data;
|
||||
void *data_end;
|
||||
void *data_meta;
|
||||
void *data_hard_start;
|
||||
struct xdp_rxq_info *rxq;
|
||||
};
|
||||
|
||||
struct sk_msg_buff {
|
||||
void *data;
|
||||
void *data_end;
|
||||
@@ -519,9 +528,9 @@ struct sk_msg_buff {
|
||||
int sg_end;
|
||||
struct scatterlist sg_data[MAX_SKB_FRAGS];
|
||||
bool sg_copy[MAX_SKB_FRAGS];
|
||||
__u32 key;
|
||||
__u32 flags;
|
||||
struct bpf_map *map;
|
||||
struct sock *sk_redir;
|
||||
struct sock *sk;
|
||||
struct sk_buff *skb;
|
||||
struct list_head list;
|
||||
};
|
||||
@@ -630,16 +639,34 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
|
||||
return prog->type == BPF_PROG_TYPE_UNSPEC;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
|
||||
static inline u32 bpf_ctx_off_adjust_machine(u32 size)
|
||||
{
|
||||
bool off_ok;
|
||||
const u32 size_machine = sizeof(unsigned long);
|
||||
|
||||
if (size > size_machine && size % size_machine == 0)
|
||||
size = size_machine;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
|
||||
u32 size_default)
|
||||
{
|
||||
size_default = bpf_ctx_off_adjust_machine(size_default);
|
||||
size_access = bpf_ctx_off_adjust_machine(size_access);
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
off_ok = (off & (size_default - 1)) == 0;
|
||||
return (off & (size_default - 1)) == 0;
|
||||
#else
|
||||
off_ok = (off & (size_default - 1)) + size == size_default;
|
||||
return (off & (size_default - 1)) + size_access == size_default;
|
||||
#endif
|
||||
return off_ok && size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||
{
|
||||
return bpf_ctx_narrow_align_ok(off, size, size_default) &&
|
||||
size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
@@ -766,27 +793,12 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
* This does not appear to be a real limitation for existing software.
|
||||
*/
|
||||
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
struct bpf_prog *prog);
|
||||
struct xdp_buff *xdp, struct bpf_prog *prog);
|
||||
int xdp_do_redirect(struct net_device *dev,
|
||||
struct xdp_buff *xdp,
|
||||
struct bpf_prog *prog);
|
||||
void xdp_do_flush_map(void);
|
||||
|
||||
/* Drivers not supporting XDP metadata can use this helper, which
|
||||
* rejects any room expansion for metadata as a result.
|
||||
*/
|
||||
static __always_inline void
|
||||
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
|
||||
{
|
||||
xdp->data_meta = xdp->data + 1;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
||||
{
|
||||
return unlikely(xdp->data_meta > xdp->data);
|
||||
}
|
||||
|
||||
void bpf_warn_invalid_xdp_action(u32 act);
|
||||
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb);
|
||||
@@ -1029,6 +1041,7 @@ struct bpf_sock_addr_kern {
|
||||
* only two (src and dst) are available at convert_ctx_access time
|
||||
*/
|
||||
u64 tmp_reg;
|
||||
void *t_ctx; /* Attach type specific context. */
|
||||
};
|
||||
|
||||
struct bpf_sock_ops_kern {
|
||||
|
@@ -42,6 +42,8 @@ struct builtin_fw {
|
||||
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
|
||||
int request_firmware(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
int firmware_request_nowarn(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
int request_firmware_nowait(
|
||||
struct module *module, bool uevent,
|
||||
const char *name, struct device *device, gfp_t gfp, void *context,
|
||||
@@ -59,6 +61,14 @@ static inline int request_firmware(const struct firmware **fw,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int firmware_request_nowarn(const struct firmware **fw,
|
||||
const char *name,
|
||||
struct device *device)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int request_firmware_nowait(
|
||||
struct module *module, bool uevent,
|
||||
const char *name, struct device *device, gfp_t gfp, void *context,
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Driver for Altera Partial Reconfiguration IP Core
|
||||
*
|
||||
@@ -5,18 +6,6 @@
|
||||
*
|
||||
* Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
|
||||
* by Alan Tull <atull@opensource.altera.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _ALT_PR_IP_CORE_H
|
||||
|
@@ -62,8 +62,11 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
|
||||
struct fpga_image_info *info,
|
||||
struct list_head *bridge_list);
|
||||
|
||||
int fpga_bridge_register(struct device *dev, const char *name,
|
||||
const struct fpga_bridge_ops *br_ops, void *priv);
|
||||
void fpga_bridge_unregister(struct device *dev);
|
||||
struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
|
||||
const struct fpga_bridge_ops *br_ops,
|
||||
void *priv);
|
||||
void fpga_bridge_free(struct fpga_bridge *br);
|
||||
int fpga_bridge_register(struct fpga_bridge *br);
|
||||
void fpga_bridge_unregister(struct fpga_bridge *br);
|
||||
|
||||
#endif /* _LINUX_FPGA_BRIDGE_H */
|
||||
|
@@ -1,20 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* FPGA Framework
|
||||
*
|
||||
* Copyright (C) 2013-2016 Altera Corporation
|
||||
* Copyright (C) 2017 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef _LINUX_FPGA_MGR_H
|
||||
#define _LINUX_FPGA_MGR_H
|
||||
@@ -170,9 +159,11 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
|
||||
|
||||
void fpga_mgr_put(struct fpga_manager *mgr);
|
||||
|
||||
int fpga_mgr_register(struct device *dev, const char *name,
|
||||
const struct fpga_manager_ops *mops, void *priv);
|
||||
|
||||
void fpga_mgr_unregister(struct device *dev);
|
||||
struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
|
||||
const struct fpga_manager_ops *mops,
|
||||
void *priv);
|
||||
void fpga_mgr_free(struct fpga_manager *mgr);
|
||||
int fpga_mgr_register(struct fpga_manager *mgr);
|
||||
void fpga_mgr_unregister(struct fpga_manager *mgr);
|
||||
|
||||
#endif /*_LINUX_FPGA_MGR_H */
|
||||
|
@@ -1,3 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _FPGA_REGION_H
|
||||
#define _FPGA_REGION_H
|
||||
|
||||
@@ -14,7 +16,6 @@
|
||||
* @info: FPGA image info
|
||||
* @priv: private data
|
||||
* @get_bridges: optional function to get bridges to a list
|
||||
* @groups: optional attribute groups.
|
||||
*/
|
||||
struct fpga_region {
|
||||
struct device dev;
|
||||
@@ -24,7 +25,6 @@ struct fpga_region {
|
||||
struct fpga_image_info *info;
|
||||
void *priv;
|
||||
int (*get_bridges)(struct fpga_region *region);
|
||||
const struct attribute_group **groups;
|
||||
};
|
||||
|
||||
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
|
||||
@@ -34,7 +34,12 @@ struct fpga_region *fpga_region_class_find(
|
||||
int (*match)(struct device *, const void *));
|
||||
|
||||
int fpga_region_program_fpga(struct fpga_region *region);
|
||||
int fpga_region_register(struct device *dev, struct fpga_region *region);
|
||||
int fpga_region_unregister(struct fpga_region *region);
|
||||
|
||||
struct fpga_region
|
||||
*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
|
||||
int (*get_bridges)(struct fpga_region *));
|
||||
void fpga_region_free(struct fpga_region *region);
|
||||
int fpga_region_register(struct fpga_region *region);
|
||||
void fpga_region_unregister(struct fpga_region *region);
|
||||
|
||||
#endif /* _FPGA_REGION_H */
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <linux/delayed_call.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/errseq.h>
|
||||
#include <linux/ioprio.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
@@ -94,7 +95,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
|
||||
/*
|
||||
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
|
||||
* to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
|
||||
* to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
|
||||
*/
|
||||
|
||||
/* file is open for reading */
|
||||
@@ -206,9 +207,9 @@ struct iattr {
|
||||
kuid_t ia_uid;
|
||||
kgid_t ia_gid;
|
||||
loff_t ia_size;
|
||||
struct timespec ia_atime;
|
||||
struct timespec ia_mtime;
|
||||
struct timespec ia_ctime;
|
||||
struct timespec64 ia_atime;
|
||||
struct timespec64 ia_mtime;
|
||||
struct timespec64 ia_ctime;
|
||||
|
||||
/*
|
||||
* Not an attribute, but an auxiliary info for filesystems wanting to
|
||||
@@ -299,7 +300,8 @@ struct kiocb {
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
enum rw_hint ki_hint;
|
||||
u16 ki_hint;
|
||||
u16 ki_ioprio; /* See linux/ioprio.h */
|
||||
} __randomize_layout;
|
||||
|
||||
static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
||||
@@ -602,9 +604,9 @@ struct inode {
|
||||
};
|
||||
dev_t i_rdev;
|
||||
loff_t i_size;
|
||||
struct timespec i_atime;
|
||||
struct timespec i_mtime;
|
||||
struct timespec i_ctime;
|
||||
struct timespec64 i_atime;
|
||||
struct timespec64 i_mtime;
|
||||
struct timespec64 i_ctime;
|
||||
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
|
||||
unsigned short i_bytes;
|
||||
unsigned int i_blkbits;
|
||||
@@ -1091,7 +1093,7 @@ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct
|
||||
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
|
||||
extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
|
||||
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
|
||||
extern void lease_get_mtime(struct inode *, struct timespec *time);
|
||||
extern void lease_get_mtime(struct inode *, struct timespec64 *time);
|
||||
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
|
||||
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
|
||||
extern int lease_modify(struct file_lock *, int, struct list_head *);
|
||||
@@ -1206,7 +1208,8 @@ static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
|
||||
static inline void lease_get_mtime(struct inode *inode,
|
||||
struct timespec64 *time)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@@ -1250,7 +1253,7 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
|
||||
}
|
||||
|
||||
struct fasync_struct {
|
||||
spinlock_t fa_lock;
|
||||
rwlock_t fa_lock;
|
||||
int magic;
|
||||
int fa_fd;
|
||||
struct fasync_struct *fa_next; /* singly linked list */
|
||||
@@ -1364,9 +1367,9 @@ struct super_block {
|
||||
void *s_security;
|
||||
#endif
|
||||
const struct xattr_handler **s_xattr;
|
||||
|
||||
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
|
||||
const struct fscrypt_operations *s_cop;
|
||||
|
||||
#endif
|
||||
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
|
||||
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
|
||||
struct block_device *s_bdev;
|
||||
@@ -1476,7 +1479,8 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
|
||||
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
|
||||
}
|
||||
|
||||
extern struct timespec current_time(struct inode *inode);
|
||||
extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
|
||||
extern struct timespec64 current_time(struct inode *inode);
|
||||
|
||||
/*
|
||||
* Snapshotting support.
|
||||
@@ -1597,6 +1601,11 @@ static inline void sb_start_intwrite(struct super_block *sb)
|
||||
__sb_start_write(sb, SB_FREEZE_FS, true);
|
||||
}
|
||||
|
||||
static inline int sb_start_intwrite_trylock(struct super_block *sb)
|
||||
{
|
||||
return __sb_start_write(sb, SB_FREEZE_FS, false);
|
||||
}
|
||||
|
||||
|
||||
extern bool inode_owner_or_capable(const struct inode *inode);
|
||||
|
||||
@@ -1711,6 +1720,8 @@ struct file_operations {
|
||||
int (*iterate) (struct file *, struct dir_context *);
|
||||
int (*iterate_shared) (struct file *, struct dir_context *);
|
||||
__poll_t (*poll) (struct file *, struct poll_table_struct *);
|
||||
struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
|
||||
__poll_t (*poll_mask) (struct file *, __poll_t);
|
||||
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
int (*mmap) (struct file *, struct vm_area_struct *);
|
||||
@@ -1764,7 +1775,7 @@ struct inode_operations {
|
||||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
|
||||
u64 len);
|
||||
int (*update_time)(struct inode *, struct timespec *, int);
|
||||
int (*update_time)(struct inode *, struct timespec64 *, int);
|
||||
int (*atomic_open)(struct inode *, struct dentry *,
|
||||
struct file *, unsigned open_flag,
|
||||
umode_t create_mode, int *opened);
|
||||
@@ -1927,12 +1938,22 @@ static inline enum rw_hint file_write_hint(struct file *file)
|
||||
|
||||
static inline int iocb_flags(struct file *file);
|
||||
|
||||
static inline u16 ki_hint_validate(enum rw_hint hint)
|
||||
{
|
||||
typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
|
||||
|
||||
if (hint <= max_hint)
|
||||
return hint;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
||||
{
|
||||
*kiocb = (struct kiocb) {
|
||||
.ki_filp = filp,
|
||||
.ki_flags = iocb_flags(filp),
|
||||
.ki_hint = file_write_hint(filp),
|
||||
.ki_hint = ki_hint_validate(file_write_hint(filp)),
|
||||
.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2198,7 +2219,7 @@ extern int current_umask(void);
|
||||
|
||||
extern void ihold(struct inode * inode);
|
||||
extern void iput(struct inode *);
|
||||
extern int generic_update_time(struct inode *, struct timespec *, int);
|
||||
extern int generic_update_time(struct inode *, struct timespec64 *, int);
|
||||
|
||||
/* /sys/fs */
|
||||
extern struct kobject *fs_kobj;
|
||||
@@ -2570,7 +2591,7 @@ extern bool is_bad_inode(struct inode *);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern void check_disk_size_change(struct gendisk *disk,
|
||||
struct block_device *bdev);
|
||||
struct block_device *bdev, bool verbose);
|
||||
extern int revalidate_disk(struct gendisk *);
|
||||
extern int check_disk_change(struct block_device *);
|
||||
extern int __invalidate_device(struct block_device *, bool);
|
||||
@@ -2879,6 +2900,10 @@ extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
|
||||
int (*test)(struct inode *, void *), void *data);
|
||||
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
|
||||
|
||||
extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
|
||||
int (*test)(struct inode *, void *),
|
||||
int (*set)(struct inode *, void *),
|
||||
void *data);
|
||||
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
|
||||
extern struct inode * iget_locked(struct super_block *, unsigned long);
|
||||
extern struct inode *find_inode_nowait(struct super_block *,
|
||||
|
@@ -25,6 +25,10 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
|
||||
}
|
||||
|
||||
/* crypto.c */
|
||||
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
@@ -64,16 +68,6 @@ static inline void fscrypt_restore_control_page(struct page *page)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void fscrypt_set_d_op(struct dentry *dentry)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/* policy.c */
|
||||
static inline int fscrypt_ioctl_set_policy(struct file *filp,
|
||||
const void __user *arg)
|
||||
@@ -160,10 +154,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
|
||||
}
|
||||
|
||||
/* bio.c */
|
||||
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
|
||||
struct bio *bio)
|
||||
static inline void fscrypt_decrypt_bio(struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
|
||||
struct bio *bio)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
|
||||
|
@@ -29,7 +29,7 @@ struct fscrypt_operations {
|
||||
int (*set_context)(struct inode *, const void *, size_t, void *);
|
||||
bool (*dummy_context)(struct inode *);
|
||||
bool (*empty_dir)(struct inode *);
|
||||
unsigned (*max_namelen)(struct inode *);
|
||||
unsigned int max_namelen;
|
||||
};
|
||||
|
||||
struct fscrypt_ctx {
|
||||
@@ -59,6 +59,7 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
|
||||
}
|
||||
|
||||
/* crypto.c */
|
||||
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
|
||||
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
|
||||
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
|
||||
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
|
||||
@@ -74,20 +75,6 @@ static inline struct page *fscrypt_control_page(struct page *page)
|
||||
|
||||
extern void fscrypt_restore_control_page(struct page *);
|
||||
|
||||
extern const struct dentry_operations fscrypt_d_ops;
|
||||
|
||||
static inline void fscrypt_set_d_op(struct dentry *dentry)
|
||||
{
|
||||
d_set_d_op(dentry, &fscrypt_d_ops);
|
||||
}
|
||||
|
||||
static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
|
||||
{
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
||||
/* policy.c */
|
||||
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
|
||||
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
|
||||
@@ -188,7 +175,9 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
|
||||
}
|
||||
|
||||
/* bio.c */
|
||||
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
|
||||
extern void fscrypt_decrypt_bio(struct bio *);
|
||||
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
|
||||
struct bio *bio);
|
||||
extern void fscrypt_pullback_bio_page(struct page **, bool);
|
||||
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
|
||||
unsigned int);
|
||||
|
141
include/linux/fsl/ptp_qoriq.h
Normal file
141
include/linux/fsl/ptp_qoriq.h
Normal file
@@ -0,0 +1,141 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2010 OMICRON electronics GmbH
|
||||
* Copyright 2018 NXP
|
||||
*/
|
||||
#ifndef __PTP_QORIQ_H__
|
||||
#define __PTP_QORIQ_H__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
/*
|
||||
* qoriq ptp registers
|
||||
* Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
|
||||
*/
|
||||
struct qoriq_ptp_registers {
|
||||
u32 tmr_ctrl; /* Timer control register */
|
||||
u32 tmr_tevent; /* Timestamp event register */
|
||||
u32 tmr_temask; /* Timer event mask register */
|
||||
u32 tmr_pevent; /* Timestamp event register */
|
||||
u32 tmr_pemask; /* Timer event mask register */
|
||||
u32 tmr_stat; /* Timestamp status register */
|
||||
u32 tmr_cnt_h; /* Timer counter high register */
|
||||
u32 tmr_cnt_l; /* Timer counter low register */
|
||||
u32 tmr_add; /* Timer drift compensation addend register */
|
||||
u32 tmr_acc; /* Timer accumulator register */
|
||||
u32 tmr_prsc; /* Timer prescale */
|
||||
u8 res1[4];
|
||||
u32 tmroff_h; /* Timer offset high */
|
||||
u32 tmroff_l; /* Timer offset low */
|
||||
u8 res2[8];
|
||||
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
|
||||
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
|
||||
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
|
||||
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
|
||||
u8 res3[48];
|
||||
u32 tmr_fiper1; /* Timer fixed period interval */
|
||||
u32 tmr_fiper2; /* Timer fixed period interval */
|
||||
u32 tmr_fiper3; /* Timer fixed period interval */
|
||||
u8 res4[20];
|
||||
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
|
||||
};
|
||||
|
||||
/* Bit definitions for the TMR_CTRL register */
|
||||
#define ALM1P (1<<31) /* Alarm1 output polarity */
|
||||
#define ALM2P (1<<30) /* Alarm2 output polarity */
|
||||
#define FIPERST (1<<28) /* FIPER start indication */
|
||||
#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
|
||||
#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
|
||||
#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
|
||||
#define TCLK_PERIOD_MASK (0x3ff)
|
||||
#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
|
||||
#define FRD (1<<14) /* FIPER Realignment Disable */
|
||||
#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
|
||||
#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
|
||||
#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
|
||||
#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
|
||||
#define COPH (1<<7) /* Generated clock output phase. */
|
||||
#define CIPH (1<<6) /* External oscillator input clock phase */
|
||||
#define TMSR (1<<5) /* Timer soft reset. */
|
||||
#define BYP (1<<3) /* Bypass drift compensated clock */
|
||||
#define TE (1<<2) /* 1588 timer enable. */
|
||||
#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
|
||||
#define CKSEL_MASK (0x3)
|
||||
|
||||
/* Bit definitions for the TMR_TEVENT register */
|
||||
#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
|
||||
#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
|
||||
#define ALM2 (1<<17) /* Current time = alarm time register 2 */
|
||||
#define ALM1 (1<<16) /* Current time = alarm time register 1 */
|
||||
#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
|
||||
#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
|
||||
#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
|
||||
|
||||
/* Bit definitions for the TMR_TEMASK register */
|
||||
#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
|
||||
#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
|
||||
#define ALM2EN (1<<17) /* Timer ALM2 event enable */
|
||||
#define ALM1EN (1<<16) /* Timer ALM1 event enable */
|
||||
#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
|
||||
#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
|
||||
|
||||
/* Bit definitions for the TMR_PEVENT register */
|
||||
#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
|
||||
#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
|
||||
#define RXP (1<<0) /* PTP frame has been received */
|
||||
|
||||
/* Bit definitions for the TMR_PEMASK register */
|
||||
#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
|
||||
#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
|
||||
#define RXPEN (1<<0) /* Receive PTP packet event enable */
|
||||
|
||||
/* Bit definitions for the TMR_STAT register */
|
||||
#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
|
||||
#define STAT_VEC_MASK (0x3f)
|
||||
|
||||
/* Bit definitions for the TMR_PRSC register */
|
||||
#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
|
||||
#define PRSC_OCK_MASK (0xffff)
|
||||
|
||||
|
||||
#define DRIVER "ptp_qoriq"
|
||||
#define DEFAULT_CKSEL 1
|
||||
#define N_EXT_TS 2
|
||||
#define REG_SIZE sizeof(struct qoriq_ptp_registers)
|
||||
|
||||
struct qoriq_ptp {
|
||||
struct qoriq_ptp_registers __iomem *regs;
|
||||
spinlock_t lock; /* protects regs */
|
||||
struct ptp_clock *clock;
|
||||
struct ptp_clock_info caps;
|
||||
struct resource *rsrc;
|
||||
int irq;
|
||||
int phc_index;
|
||||
u64 alarm_interval; /* for periodic alarm */
|
||||
u64 alarm_value;
|
||||
u32 tclk_period; /* nanoseconds */
|
||||
u32 tmr_prsc;
|
||||
u32 tmr_add;
|
||||
u32 cksel;
|
||||
u32 tmr_fiper1;
|
||||
u32 tmr_fiper2;
|
||||
};
|
||||
|
||||
static inline u32 qoriq_read(unsigned __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32be(addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void qoriq_write(unsigned __iomem *addr, u32 val)
|
||||
{
|
||||
iowrite32be(val, addr);
|
||||
}
|
||||
|
||||
#endif
|
@@ -98,8 +98,6 @@ struct fsnotify_iter_info;
|
||||
struct fsnotify_ops {
|
||||
int (*handle_event)(struct fsnotify_group *group,
|
||||
struct inode *inode,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
u32 mask, const void *data, int data_type,
|
||||
const unsigned char *file_name, u32 cookie,
|
||||
struct fsnotify_iter_info *iter_info);
|
||||
@@ -201,6 +199,57 @@ struct fsnotify_group {
|
||||
#define FSNOTIFY_EVENT_PATH 1
|
||||
#define FSNOTIFY_EVENT_INODE 2
|
||||
|
||||
enum fsnotify_obj_type {
|
||||
FSNOTIFY_OBJ_TYPE_INODE,
|
||||
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
|
||||
FSNOTIFY_OBJ_TYPE_COUNT,
|
||||
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
|
||||
};
|
||||
|
||||
#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
|
||||
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
|
||||
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
|
||||
|
||||
struct fsnotify_iter_info {
|
||||
struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
|
||||
unsigned int report_mask;
|
||||
int srcu_idx;
|
||||
};
|
||||
|
||||
static inline bool fsnotify_iter_should_report_type(
|
||||
struct fsnotify_iter_info *iter_info, int type)
|
||||
{
|
||||
return (iter_info->report_mask & (1U << type));
|
||||
}
|
||||
|
||||
static inline void fsnotify_iter_set_report_type(
|
||||
struct fsnotify_iter_info *iter_info, int type)
|
||||
{
|
||||
iter_info->report_mask |= (1U << type);
|
||||
}
|
||||
|
||||
static inline void fsnotify_iter_set_report_type_mark(
|
||||
struct fsnotify_iter_info *iter_info, int type,
|
||||
struct fsnotify_mark *mark)
|
||||
{
|
||||
iter_info->marks[type] = mark;
|
||||
iter_info->report_mask |= (1U << type);
|
||||
}
|
||||
|
||||
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
|
||||
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
|
||||
struct fsnotify_iter_info *iter_info) \
|
||||
{ \
|
||||
return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
|
||||
iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
|
||||
}
|
||||
|
||||
FSNOTIFY_ITER_FUNCS(inode, INODE)
|
||||
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
|
||||
|
||||
#define fsnotify_foreach_obj_type(type) \
|
||||
for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
|
||||
|
||||
/*
|
||||
* Inode / vfsmount point to this structure which tracks all marks attached to
|
||||
* the inode / vfsmount. The reference to inode / vfsmount is held by this
|
||||
@@ -209,11 +258,7 @@ struct fsnotify_group {
|
||||
*/
|
||||
struct fsnotify_mark_connector {
|
||||
spinlock_t lock;
|
||||
#define FSNOTIFY_OBJ_TYPE_INODE 0x01
|
||||
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
|
||||
#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
|
||||
FSNOTIFY_OBJ_TYPE_VFSMOUNT)
|
||||
unsigned int flags; /* Type of object [lock] */
|
||||
unsigned int type; /* Type of object [lock] */
|
||||
union { /* Object pointer [lock] */
|
||||
struct inode *inode;
|
||||
struct vfsmount *mnt;
|
||||
@@ -356,7 +401,21 @@ extern struct fsnotify_mark *fsnotify_find_mark(
|
||||
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
|
||||
struct vfsmount *mnt, int allow_dups);
|
||||
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
|
||||
struct inode *inode, struct vfsmount *mnt, int allow_dups);
|
||||
struct inode *inode, struct vfsmount *mnt,
|
||||
int allow_dups);
|
||||
/* attach the mark to the inode */
|
||||
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
|
||||
struct inode *inode,
|
||||
int allow_dups)
|
||||
{
|
||||
return fsnotify_add_mark(mark, inode, NULL, allow_dups);
|
||||
}
|
||||
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
|
||||
struct inode *inode,
|
||||
int allow_dups)
|
||||
{
|
||||
return fsnotify_add_mark_locked(mark, inode, NULL, allow_dups);
|
||||
}
|
||||
/* given a group and a mark, flag mark to be freed when all references are dropped */
|
||||
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
|
||||
struct fsnotify_group *group);
|
||||
@@ -369,12 +428,12 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned
|
||||
/* run all the marks in a group, and clear all of the vfsmount marks */
|
||||
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
|
||||
{
|
||||
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
|
||||
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
|
||||
}
|
||||
/* run all the marks in a group, and clear all of the inode marks */
|
||||
static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
|
||||
{
|
||||
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
|
||||
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
|
||||
}
|
||||
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
|
||||
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Ftrace header. For implementation details beyond the random comments
|
||||
* scattered below, see: Documentation/trace/ftrace-design.txt
|
||||
* scattered below, see: Documentation/trace/ftrace-design.rst
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FTRACE_H
|
||||
|
@@ -24,6 +24,7 @@ struct vm_area_struct;
|
||||
#define ___GFP_HIGH 0x20u
|
||||
#define ___GFP_IO 0x40u
|
||||
#define ___GFP_FS 0x80u
|
||||
#define ___GFP_WRITE 0x100u
|
||||
#define ___GFP_NOWARN 0x200u
|
||||
#define ___GFP_RETRY_MAYFAIL 0x400u
|
||||
#define ___GFP_NOFAIL 0x800u
|
||||
@@ -36,11 +37,10 @@ struct vm_area_struct;
|
||||
#define ___GFP_THISNODE 0x40000u
|
||||
#define ___GFP_ATOMIC 0x80000u
|
||||
#define ___GFP_ACCOUNT 0x100000u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400000u
|
||||
#define ___GFP_WRITE 0x800000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x200000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x400000u
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x2000000u
|
||||
#define ___GFP_NOLOCKDEP 0x800000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
@@ -205,7 +205,7 @@ struct vm_area_struct;
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
@@ -343,7 +343,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
* 0x1 => DMA or NORMAL
|
||||
* 0x2 => HIGHMEM or NORMAL
|
||||
* 0x3 => BAD (DMA+HIGHMEM)
|
||||
* 0x4 => DMA32 or DMA or NORMAL
|
||||
* 0x4 => DMA32 or NORMAL
|
||||
* 0x5 => BAD (DMA+DMA32)
|
||||
* 0x6 => BAD (HIGHMEM+DMA32)
|
||||
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
|
||||
@@ -351,7 +351,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
|
||||
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
|
||||
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
|
||||
* 0xc => DMA32 (MOVABLE+DMA32)
|
||||
* 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
|
||||
* 0xd => BAD (MOVABLE+DMA32+DMA)
|
||||
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
|
||||
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
|
||||
|
@@ -116,7 +116,7 @@ int gpiod_get_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_raw_array_value(unsigned int array_size,
|
||||
int gpiod_set_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
|
||||
@@ -134,7 +134,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
|
||||
@@ -369,12 +369,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
}
|
||||
static inline void gpiod_set_raw_array_value(unsigned int array_size,
|
||||
static inline int gpiod_set_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
@@ -423,12 +424,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
}
|
||||
static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
|
@@ -39,6 +39,23 @@ struct gpiod_lookup_table {
|
||||
struct gpiod_lookup table[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gpiod_hog - GPIO line hog table
|
||||
* @chip_label: name of the chip the GPIO belongs to
|
||||
* @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
|
||||
* @line_name: consumer name for the hogged line
|
||||
* @lflags: mask of GPIO lookup flags
|
||||
* @dflags: GPIO flags used to specify the direction and value
|
||||
*/
|
||||
struct gpiod_hog {
|
||||
struct list_head list;
|
||||
const char *chip_label;
|
||||
u16 chip_hwnum;
|
||||
const char *line_name;
|
||||
enum gpio_lookup_flags lflags;
|
||||
int dflags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Simple definition of a single GPIO under a con_id
|
||||
*/
|
||||
@@ -59,10 +76,23 @@ struct gpiod_lookup_table {
|
||||
.flags = _flags, \
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple definition of a single GPIO hog in an array.
|
||||
*/
|
||||
#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
|
||||
{ \
|
||||
.chip_label = _chip_label, \
|
||||
.chip_hwnum = _chip_hwnum, \
|
||||
.line_name = _line_name, \
|
||||
.lflags = _lflags, \
|
||||
.dflags = _dflags, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
|
||||
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
|
||||
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
|
||||
void gpiod_add_hogs(struct gpiod_hog *hogs);
|
||||
#else
|
||||
static inline
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
|
||||
@@ -70,6 +100,7 @@ static inline
|
||||
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
|
||||
static inline
|
||||
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
|
||||
static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_GPIO_MACHINE_H */
|
||||
|
@@ -292,9 +292,12 @@ struct hid_item {
|
||||
#define HID_DG_CONTACTCOUNT 0x000d0054
|
||||
#define HID_DG_CONTACTMAX 0x000d0055
|
||||
#define HID_DG_SCANTIME 0x000d0056
|
||||
#define HID_DG_SURFACESWITCH 0x000d0057
|
||||
#define HID_DG_BUTTONSWITCH 0x000d0058
|
||||
#define HID_DG_BUTTONTYPE 0x000d0059
|
||||
#define HID_DG_BARRELSWITCH2 0x000d005a
|
||||
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
|
||||
#define HID_DG_LATENCYMODE 0x000d0060
|
||||
|
||||
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
|
||||
/*
|
||||
@@ -341,10 +344,12 @@ struct hid_item {
|
||||
/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
|
||||
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
|
||||
#define HID_QUIRK_ALWAYS_POLL BIT(10)
|
||||
#define HID_QUIRK_INPUT_PER_APP BIT(11)
|
||||
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
|
||||
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
|
||||
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
|
||||
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
|
||||
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
|
||||
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
|
||||
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
|
||||
#define HID_QUIRK_NO_IGNORE BIT(30)
|
||||
@@ -367,6 +372,7 @@ struct hid_item {
|
||||
#define HID_GROUP_RMI 0x0100
|
||||
#define HID_GROUP_WACOM 0x0101
|
||||
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
|
||||
#define HID_GROUP_STEAM 0x0103
|
||||
|
||||
/*
|
||||
* HID protocol status
|
||||
@@ -463,8 +469,10 @@ struct hid_field {
|
||||
|
||||
struct hid_report {
|
||||
struct list_head list;
|
||||
unsigned id; /* id of this report */
|
||||
unsigned type; /* report type */
|
||||
struct list_head hidinput_list;
|
||||
unsigned int id; /* id of this report */
|
||||
unsigned int type; /* report type */
|
||||
unsigned int application; /* application usage for this report */
|
||||
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
|
||||
unsigned maxfield; /* maximum valid field index */
|
||||
unsigned size; /* size of the report (bits) */
|
||||
@@ -502,12 +510,15 @@ struct hid_output_fifo {
|
||||
|
||||
#define HID_STAT_ADDED BIT(0)
|
||||
#define HID_STAT_PARSED BIT(1)
|
||||
#define HID_STAT_DUP_DETECTED BIT(2)
|
||||
|
||||
struct hid_input {
|
||||
struct list_head list;
|
||||
struct hid_report *report;
|
||||
struct input_dev *input;
|
||||
const char *name;
|
||||
bool registered;
|
||||
struct list_head reports; /* the list of reports */
|
||||
};
|
||||
|
||||
enum hid_type {
|
||||
@@ -864,7 +875,9 @@ void hid_output_report(struct hid_report *report, __u8 *data);
|
||||
void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
|
||||
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
|
||||
struct hid_device *hid_allocate_device(void);
|
||||
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
|
||||
struct hid_report *hid_register_report(struct hid_device *device,
|
||||
unsigned int type, unsigned int id,
|
||||
unsigned int application);
|
||||
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
|
||||
struct hid_report *hid_validate_values(struct hid_device *hid,
|
||||
unsigned int type, unsigned int id,
|
||||
|
@@ -16,7 +16,7 @@
|
||||
/*
|
||||
* Heterogeneous Memory Management (HMM)
|
||||
*
|
||||
* See Documentation/vm/hmm.txt for reasons and overview of what HMM is and it
|
||||
* See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
|
||||
* is for. Here we focus on the HMM API description, with some explanation of
|
||||
* the underlying implementation.
|
||||
*
|
||||
@@ -522,9 +522,7 @@ void hmm_devmem_remove(struct hmm_devmem *devmem);
|
||||
static inline void hmm_devmem_page_set_drvdata(struct page *page,
|
||||
unsigned long data)
|
||||
{
|
||||
unsigned long *drvdata = (unsigned long *)&page->pgmap;
|
||||
|
||||
drvdata[1] = data;
|
||||
page->hmm_data = data;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -535,9 +533,7 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
|
||||
*/
|
||||
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
|
||||
{
|
||||
const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
|
||||
|
||||
return drvdata[1];
|
||||
return page->hmm_data;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -192,13 +192,6 @@ struct host1x_reloc {
|
||||
unsigned long shift;
|
||||
};
|
||||
|
||||
struct host1x_waitchk {
|
||||
struct host1x_bo *bo;
|
||||
u32 offset;
|
||||
u32 syncpt_id;
|
||||
u32 thresh;
|
||||
};
|
||||
|
||||
struct host1x_job {
|
||||
/* When refcount goes to zero, job can be freed */
|
||||
struct kref ref;
|
||||
@@ -209,19 +202,15 @@ struct host1x_job {
|
||||
/* Channel where job is submitted to */
|
||||
struct host1x_channel *channel;
|
||||
|
||||
u32 client;
|
||||
/* client where the job originated */
|
||||
struct host1x_client *client;
|
||||
|
||||
/* Gathers and their memory */
|
||||
struct host1x_job_gather *gathers;
|
||||
unsigned int num_gathers;
|
||||
|
||||
/* Wait checks to be processed at submit time */
|
||||
struct host1x_waitchk *waitchk;
|
||||
unsigned int num_waitchk;
|
||||
u32 waitchk_mask;
|
||||
|
||||
/* Array of handles to be pinned & unpinned */
|
||||
struct host1x_reloc *relocarray;
|
||||
struct host1x_reloc *relocs;
|
||||
unsigned int num_relocs;
|
||||
struct host1x_job_unpin_data *unpins;
|
||||
unsigned int num_unpins;
|
||||
@@ -261,10 +250,9 @@ struct host1x_job {
|
||||
};
|
||||
|
||||
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
|
||||
u32 num_cmdbufs, u32 num_relocs,
|
||||
u32 num_waitchks);
|
||||
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
|
||||
u32 words, u32 offset);
|
||||
u32 num_cmdbufs, u32 num_relocs);
|
||||
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
|
||||
unsigned int words, unsigned int offset);
|
||||
struct host1x_job *host1x_job_get(struct host1x_job *job);
|
||||
void host1x_job_put(struct host1x_job *job);
|
||||
int host1x_job_pin(struct host1x_job *job, struct device *dev);
|
||||
|
@@ -1,18 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Hardware spinlock public header
|
||||
*
|
||||
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
|
||||
*
|
||||
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_HWSPINLOCK_H
|
||||
@@ -24,6 +16,7 @@
|
||||
/* hwspinlock mode argument */
|
||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||
#define HWLOCK_RAW 0x03
|
||||
|
||||
struct device;
|
||||
struct device_node;
|
||||
@@ -175,6 +168,25 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
|
||||
return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_trylock_raw() - attempt to lock a specific hwspinlock
|
||||
* @hwlock: an hwspinlock which we want to trylock
|
||||
*
|
||||
* This function attempts to lock an hwspinlock, and will immediately fail
|
||||
* if the hwspinlock is already taken.
|
||||
*
|
||||
* Caution: User must protect the routine of getting hardware lock with mutex
|
||||
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
||||
* or sleepable operations under the hardware lock.
|
||||
*
|
||||
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
||||
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
||||
*/
|
||||
static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
|
||||
{
|
||||
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||
* @hwlock: an hwspinlock which we want to trylock
|
||||
@@ -242,6 +254,29 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
|
||||
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
|
||||
* @hwlock: the hwspinlock to be locked
|
||||
* @to: timeout value in msecs
|
||||
*
|
||||
* This function locks the underlying @hwlock. If the @hwlock
|
||||
* is already taken, the function will busy loop waiting for it to
|
||||
* be released, but give up when @timeout msecs have elapsed.
|
||||
*
|
||||
* Caution: User must protect the routine of getting hardware lock with mutex
|
||||
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
||||
* or sleepable operations under the hardware lock.
|
||||
*
|
||||
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
||||
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
||||
* busy after @timeout msecs). The function will never sleep.
|
||||
*/
|
||||
static inline
|
||||
int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
|
||||
{
|
||||
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
||||
* @hwlock: the hwspinlock to be locked
|
||||
@@ -301,6 +336,21 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
|
||||
__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_unlock_raw() - unlock hwspinlock
|
||||
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||
*
|
||||
* This function will unlock a specific hwspinlock.
|
||||
*
|
||||
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
||||
* this function: it is a bug to call unlock on a @hwlock that is already
|
||||
* unlocked.
|
||||
*/
|
||||
static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
|
||||
{
|
||||
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_unlock() - unlock hwspinlock
|
||||
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
|
||||
#define MAX_PAGE_BUFFER_COUNT 32
|
||||
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
|
||||
@@ -120,6 +121,7 @@ struct hv_ring_buffer {
|
||||
struct hv_ring_buffer_info {
|
||||
struct hv_ring_buffer *ring_buffer;
|
||||
u32 ring_size; /* Include the shared header */
|
||||
struct reciprocal_value ring_size_div10_reciprocal;
|
||||
spinlock_t ring_lock;
|
||||
|
||||
u32 ring_datasize; /* < ring_size */
|
||||
@@ -154,6 +156,16 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
|
||||
return write;
|
||||
}
|
||||
|
||||
static inline u32 hv_get_avail_to_write_percent(
|
||||
const struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
u32 avail_write = hv_get_bytes_to_write(rbi);
|
||||
|
||||
return reciprocal_divide(
|
||||
(avail_write << 3) + (avail_write << 1),
|
||||
rbi->ring_size_div10_reciprocal);
|
||||
}
|
||||
|
||||
/*
|
||||
* VMBUS version is 32 bit entity broken up into
|
||||
* two 16 bit quantities: major_number. minor_number.
|
||||
@@ -163,6 +175,7 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
|
||||
* 2 . 4 (Windows 8)
|
||||
* 3 . 0 (Windows 8 R2)
|
||||
* 4 . 0 (Windows 10)
|
||||
* 5 . 0 (Newer Windows 10)
|
||||
*/
|
||||
|
||||
#define VERSION_WS2008 ((0 << 16) | (13))
|
||||
@@ -170,10 +183,11 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
|
||||
#define VERSION_WIN8 ((2 << 16) | (4))
|
||||
#define VERSION_WIN8_1 ((3 << 16) | (0))
|
||||
#define VERSION_WIN10 ((4 << 16) | (0))
|
||||
#define VERSION_WIN10_V5 ((5 << 16) | (0))
|
||||
|
||||
#define VERSION_INVAL -1
|
||||
|
||||
#define VERSION_CURRENT VERSION_WIN10
|
||||
#define VERSION_CURRENT VERSION_WIN10_V5
|
||||
|
||||
/* Make maximum size of pipe payload of 16K */
|
||||
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
|
||||
@@ -570,7 +584,14 @@ struct vmbus_channel_initiate_contact {
|
||||
struct vmbus_channel_message_header header;
|
||||
u32 vmbus_version_requested;
|
||||
u32 target_vcpu; /* The VCPU the host should respond to */
|
||||
u64 interrupt_page;
|
||||
union {
|
||||
u64 interrupt_page;
|
||||
struct {
|
||||
u8 msg_sint;
|
||||
u8 padding1[3];
|
||||
u32 padding2;
|
||||
};
|
||||
};
|
||||
u64 monitor_page1;
|
||||
u64 monitor_page2;
|
||||
} __packed;
|
||||
@@ -585,6 +606,19 @@ struct vmbus_channel_tl_connect_request {
|
||||
struct vmbus_channel_version_response {
|
||||
struct vmbus_channel_message_header header;
|
||||
u8 version_supported;
|
||||
|
||||
u8 connection_state;
|
||||
u16 padding;
|
||||
|
||||
/*
|
||||
* On new hosts that support VMBus protocol 5.0, we must use
|
||||
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
|
||||
* and for subsequent messages, we must use the Message Connection ID
|
||||
* field in the host-returned Version Response Message.
|
||||
*
|
||||
* On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
|
||||
*/
|
||||
u32 msg_conn_id;
|
||||
} __packed;
|
||||
|
||||
enum vmbus_channel_state {
|
||||
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
* Header file for I2C support on PNX010x/4008.
|
||||
*
|
||||
* Author: Dennis Kovalev <dkovalev@ru.mvista.com>
|
||||
*
|
||||
* 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
|
||||
* the terms of the GNU General Public License version 2. This program
|
||||
* is licensed "as is" without any warranty of any kind, whether express
|
||||
* or implied.
|
||||
*/
|
||||
|
||||
#ifndef __I2C_PNX_H__
|
||||
#define __I2C_PNX_H__
|
||||
|
||||
struct platform_device;
|
||||
struct clk;
|
||||
|
||||
struct i2c_pnx_mif {
|
||||
int ret; /* Return value */
|
||||
int mode; /* Interface mode */
|
||||
struct completion complete; /* I/O completion */
|
||||
struct timer_list timer; /* Timeout */
|
||||
u8 * buf; /* Data buffer */
|
||||
int len; /* Length of data buffer */
|
||||
int order; /* RX Bytes to order via TX */
|
||||
};
|
||||
|
||||
struct i2c_pnx_algo_data {
|
||||
void __iomem *ioaddr;
|
||||
struct i2c_pnx_mif mif;
|
||||
int last;
|
||||
struct clk *clk;
|
||||
struct i2c_adapter adapter;
|
||||
int irq;
|
||||
u32 timeout;
|
||||
};
|
||||
|
||||
#endif /* __I2C_PNX_H__ */
|
@@ -394,7 +394,6 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
|
||||
* @addr: stored in i2c_client.addr
|
||||
* @dev_name: Overrides the default <busnr>-<addr> dev_name if set
|
||||
* @platform_data: stored in i2c_client.dev.platform_data
|
||||
* @archdata: copied into i2c_client.dev.archdata
|
||||
* @of_node: pointer to OpenFirmware device node
|
||||
* @fwnode: device node supplied by the platform firmware
|
||||
* @properties: additional device properties for the device
|
||||
@@ -419,7 +418,6 @@ struct i2c_board_info {
|
||||
unsigned short addr;
|
||||
const char *dev_name;
|
||||
void *platform_data;
|
||||
struct dev_archdata *archdata;
|
||||
struct device_node *of_node;
|
||||
struct fwnode_handle *fwnode;
|
||||
const struct property_entry *properties;
|
||||
@@ -903,6 +901,9 @@ extern const struct of_device_id
|
||||
*i2c_of_match_device(const struct of_device_id *matches,
|
||||
struct i2c_client *client);
|
||||
|
||||
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
|
||||
struct i2c_board_info *info);
|
||||
|
||||
#else
|
||||
|
||||
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
|
||||
@@ -927,6 +928,13 @@ static inline const struct of_device_id
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int of_i2c_get_board_info(struct device *dev,
|
||||
struct device_node *node,
|
||||
struct i2c_board_info *info)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
|
@@ -961,7 +961,7 @@ __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
|
||||
typedef struct {
|
||||
const char *name;
|
||||
umode_t mode;
|
||||
const struct file_operations *proc_fops;
|
||||
int (*show)(struct seq_file *, void *);
|
||||
} ide_proc_entry_t;
|
||||
|
||||
void proc_ide_create(void);
|
||||
@@ -973,8 +973,8 @@ void ide_proc_unregister_port(ide_hwif_t *);
|
||||
void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
|
||||
void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
|
||||
|
||||
extern const struct file_operations ide_capacity_proc_fops;
|
||||
extern const struct file_operations ide_geometry_proc_fops;
|
||||
int ide_capacity_proc_show(struct seq_file *m, void *v);
|
||||
int ide_geometry_proc_show(struct seq_file *m, void *v);
|
||||
#else
|
||||
static inline void proc_ide_create(void) { ; }
|
||||
static inline void proc_ide_destroy(void) { ; }
|
||||
@@ -1508,8 +1508,6 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
|
||||
hwif->hwif_data = data;
|
||||
}
|
||||
|
||||
extern void ide_toggle_bounce(ide_drive_t *drive, int on);
|
||||
|
||||
u64 ide_get_lba_addr(struct ide_cmd *, int);
|
||||
u8 ide_dump_status(ide_drive_t *, const char *, u8);
|
||||
|
||||
|
@@ -50,6 +50,7 @@ struct br_ip_list {
|
||||
#define BR_VLAN_TUNNEL BIT(13)
|
||||
#define BR_BCAST_FLOOD BIT(14)
|
||||
#define BR_NEIGH_SUPPRESS BIT(15)
|
||||
#define BR_ISOLATED BIT(16)
|
||||
|
||||
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
||||
|
||||
@@ -93,11 +94,39 @@ static inline bool br_multicast_router(const struct net_device *dev)
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
|
||||
bool br_vlan_enabled(const struct net_device *dev);
|
||||
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid);
|
||||
int br_vlan_get_info(const struct net_device *dev, u16 vid,
|
||||
struct bridge_vlan_info *p_vinfo);
|
||||
#else
|
||||
static inline bool br_vlan_enabled(const struct net_device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
|
||||
struct bridge_vlan_info *p_vinfo)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE)
|
||||
struct net_device *br_fdb_find_port(const struct net_device *br_dev,
|
||||
const unsigned char *addr,
|
||||
__u16 vid);
|
||||
#else
|
||||
static inline struct net_device *
|
||||
br_fdb_find_port(const struct net_device *br_dev,
|
||||
const unsigned char *addr,
|
||||
__u16 vid)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -21,7 +21,7 @@ struct macvlan_dev {
|
||||
struct hlist_node hlist;
|
||||
struct macvlan_port *port;
|
||||
struct net_device *lowerdev;
|
||||
void *fwd_priv;
|
||||
void *accel_priv;
|
||||
struct vlan_pcpu_stats __percpu *pcpu_stats;
|
||||
|
||||
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
|
||||
@@ -61,10 +61,6 @@ extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
extern void macvlan_count_rx(const struct macvlan_dev *vlan,
|
||||
unsigned int len, bool success,
|
||||
bool multicast);
|
||||
|
||||
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
|
||||
|
||||
extern int macvlan_link_register(struct rtnl_link_ops *ops);
|
||||
@@ -86,4 +82,27 @@ macvlan_dev_real_dev(const struct net_device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void *macvlan_accel_priv(struct net_device *dev)
|
||||
{
|
||||
struct macvlan_dev *macvlan = netdev_priv(dev);
|
||||
|
||||
return macvlan->accel_priv;
|
||||
}
|
||||
|
||||
static inline bool macvlan_supports_dest_filter(struct net_device *dev)
|
||||
{
|
||||
struct macvlan_dev *macvlan = netdev_priv(dev);
|
||||
|
||||
return macvlan->mode == MACVLAN_MODE_PRIVATE ||
|
||||
macvlan->mode == MACVLAN_MODE_VEPA ||
|
||||
macvlan->mode == MACVLAN_MODE_BRIDGE;
|
||||
}
|
||||
|
||||
static inline int macvlan_release_l2fw_offload(struct net_device *dev)
|
||||
{
|
||||
struct macvlan_dev *macvlan = netdev_priv(dev);
|
||||
|
||||
macvlan->accel_priv = NULL;
|
||||
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
|
||||
}
|
||||
#endif /* _LINUX_IF_MACVLAN_H */
|
||||
|
@@ -22,7 +22,7 @@
|
||||
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
|
||||
struct socket *tun_get_socket(struct file *);
|
||||
struct ptr_ring *tun_get_tx_ring(struct file *file);
|
||||
bool tun_is_xdp_buff(void *ptr);
|
||||
bool tun_is_xdp_frame(void *ptr);
|
||||
void *tun_xdp_to_ptr(void *ptr);
|
||||
void *tun_ptr_to_xdp(void *ptr);
|
||||
void tun_ptr_free(void *ptr);
|
||||
@@ -39,7 +39,7 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
static inline bool tun_is_xdp_buff(void *ptr)
|
||||
static inline bool tun_is_xdp_frame(void *ptr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@@ -331,7 +331,7 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
|
||||
* @mac_len: MAC header length including outer vlan headers
|
||||
*
|
||||
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
|
||||
* Returns error if skb_cow_head failes.
|
||||
* Returns error if skb_cow_head fails.
|
||||
*
|
||||
* Does not change skb->protocol so this function can be used during receive.
|
||||
*/
|
||||
@@ -379,7 +379,7 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
|
||||
* @vlan_tci: VLAN TCI to insert
|
||||
*
|
||||
* Inserts the VLAN tag into @skb as part of the payload
|
||||
* Returns error if skb_cow_head failes.
|
||||
* Returns error if skb_cow_head fails.
|
||||
*
|
||||
* Does not change skb->protocol so this function can be used during receive.
|
||||
*/
|
||||
|
@@ -127,7 +127,7 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
|
||||
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
|
||||
|
||||
#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
|
||||
_storagebits, _shift, _extend_name, _type) \
|
||||
_storagebits, _shift, _extend_name, _type, _mask_all) \
|
||||
{ \
|
||||
.type = (_type), \
|
||||
.differential = (_channel2 == -1 ? 0 : 1), \
|
||||
@@ -139,7 +139,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
||||
BIT(IIO_CHAN_INFO_OFFSET), \
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
|
||||
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
|
||||
.info_mask_shared_by_all = _mask_all, \
|
||||
.scan_index = (_si), \
|
||||
.scan_type = { \
|
||||
.sign = 'u', \
|
||||
@@ -153,25 +153,35 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
|
||||
#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
|
||||
_storagebits, _shift) \
|
||||
__AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
|
||||
_storagebits, _shift, NULL, IIO_VOLTAGE)
|
||||
_storagebits, _shift, NULL, IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ))
|
||||
|
||||
#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
|
||||
_storagebits, _shift) \
|
||||
__AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
|
||||
_storagebits, _shift, "shorted", IIO_VOLTAGE)
|
||||
_storagebits, _shift, "shorted", IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ))
|
||||
|
||||
#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
|
||||
_storagebits, _shift) \
|
||||
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
|
||||
_storagebits, _shift, NULL, IIO_VOLTAGE)
|
||||
_storagebits, _shift, NULL, IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ))
|
||||
|
||||
#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
|
||||
_storagebits, _shift) \
|
||||
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
|
||||
_storagebits, _shift, NULL, IIO_VOLTAGE, 0)
|
||||
|
||||
#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
|
||||
__AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
|
||||
_storagebits, _shift, NULL, IIO_TEMP)
|
||||
_storagebits, _shift, NULL, IIO_TEMP, \
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ))
|
||||
|
||||
#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
|
||||
_shift) \
|
||||
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
|
||||
_storagebits, _shift, "supply", IIO_VOLTAGE)
|
||||
_storagebits, _shift, "supply", IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ))
|
||||
|
||||
#endif
|
||||
|
@@ -9,6 +9,8 @@
|
||||
#ifndef STM32_DFSDM_ADC_H
|
||||
#define STM32_DFSDM_ADC_H
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
|
||||
int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
|
||||
int (*cb)(const void *data, size_t size,
|
||||
void *private),
|
||||
|
180
include/linux/iio/common/cros_ec_sensors_core.h
Normal file
180
include/linux/iio/common/cros_ec_sensors_core.h
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
* ChromeOS EC sensor hub
|
||||
*
|
||||
* Copyright (C) 2016 Google, Inc
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __CROS_EC_SENSORS_CORE_H
|
||||
#define __CROS_EC_SENSORS_CORE_H
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/mfd/cros_ec.h>
|
||||
|
||||
enum {
|
||||
CROS_EC_SENSOR_X,
|
||||
CROS_EC_SENSOR_Y,
|
||||
CROS_EC_SENSOR_Z,
|
||||
CROS_EC_SENSOR_MAX_AXIS,
|
||||
};
|
||||
|
||||
/* EC returns sensor values using signed 16 bit registers */
|
||||
#define CROS_EC_SENSOR_BITS 16
|
||||
|
||||
/*
|
||||
* 4 16 bit channels are allowed.
|
||||
* Good enough for current sensors, they use up to 3 16 bit vectors.
|
||||
*/
|
||||
#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
|
||||
|
||||
/* Minimum sampling period to use when device is suspending */
|
||||
#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
|
||||
|
||||
/**
|
||||
* struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
|
||||
* @ec: cros EC device structure
|
||||
* @cmd_lock: lock used to prevent simultaneous access to the
|
||||
* commands.
|
||||
* @msg: cros EC command structure
|
||||
* @param: motion sensor parameters structure
|
||||
* @resp: motion sensor response structure
|
||||
* @type: type of motion sensor
|
||||
* @loc: location where the motion sensor is placed
|
||||
* @calib: calibration parameters. Note that trigger
|
||||
* captured data will always provide the calibrated
|
||||
* data
|
||||
* @samples: static array to hold data from a single capture.
|
||||
* For each channel we need 2 bytes, except for
|
||||
* the timestamp. The timestamp is always last and
|
||||
* is always 8-byte aligned.
|
||||
* @read_ec_sensors_data: function used for accessing sensors values
|
||||
* @cuur_sampl_freq: current sampling period
|
||||
*/
|
||||
struct cros_ec_sensors_core_state {
|
||||
struct cros_ec_device *ec;
|
||||
struct mutex cmd_lock;
|
||||
|
||||
struct cros_ec_command *msg;
|
||||
struct ec_params_motion_sense param;
|
||||
struct ec_response_motion_sense *resp;
|
||||
|
||||
enum motionsensor_type type;
|
||||
enum motionsensor_location loc;
|
||||
|
||||
s16 calib[CROS_EC_SENSOR_MAX_AXIS];
|
||||
|
||||
u8 samples[CROS_EC_SAMPLE_SIZE];
|
||||
|
||||
int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
|
||||
unsigned long scan_mask, s16 *data);
|
||||
|
||||
int curr_sampl_freq;
|
||||
};
|
||||
|
||||
/**
|
||||
* cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
|
||||
* @indio_dev: pointer to IIO device
|
||||
* @scan_mask: bitmap of the sensor indices to scan
|
||||
* @data: location to store data
|
||||
*
|
||||
* This is the safe function for reading the EC data. It guarantees that the
|
||||
* data sampled was not modified by the EC while being read.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
|
||||
s16 *data);
|
||||
|
||||
/**
|
||||
* cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
|
||||
* @indio_dev: pointer to IIO device
|
||||
* @scan_mask: bitmap of the sensor indices to scan
|
||||
* @data: location to store data
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
|
||||
s16 *data);
|
||||
|
||||
struct platform_device;
|
||||
/**
|
||||
* cros_ec_sensors_core_init() - basic initialization of the core structure
|
||||
* @pdev: platform device created for the sensors
|
||||
* @indio_dev: iio device structure of the device
|
||||
* @physical_device: true if the device refers to a physical device
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int cros_ec_sensors_core_init(struct platform_device *pdev,
|
||||
struct iio_dev *indio_dev, bool physical_device);
|
||||
|
||||
/**
|
||||
* cros_ec_sensors_capture() - the trigger handler function
|
||||
* @irq: the interrupt number.
|
||||
* @p: a pointer to the poll function.
|
||||
*
|
||||
* On a trigger event occurring, if the pollfunc is attached then this
|
||||
* handler is called as a threaded interrupt (and hence may sleep). It
|
||||
* is responsible for grabbing data from the device and pushing it into
|
||||
* the associated buffer.
|
||||
*
|
||||
* Return: IRQ_HANDLED
|
||||
*/
|
||||
irqreturn_t cros_ec_sensors_capture(int irq, void *p);
|
||||
|
||||
/**
|
||||
* cros_ec_motion_send_host_cmd() - send motion sense host command
|
||||
* @st: pointer to state information for device
|
||||
* @opt_length: optional length to reduce the response size, useful on the data
|
||||
* path. Otherwise, the maximal allowed response size is used
|
||||
*
|
||||
* When called, the sub-command is assumed to be set in param->cmd.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
|
||||
u16 opt_length);
|
||||
|
||||
/**
|
||||
* cros_ec_sensors_core_read() - function to request a value from the sensor
|
||||
* @st: pointer to state information for device
|
||||
* @chan: channel specification structure table
|
||||
* @val: will contain one element making up the returned value
|
||||
* @val2: will contain another element making up the returned value
|
||||
* @mask: specifies which values to be requested
|
||||
*
|
||||
* Return: the type of value returned by the device
|
||||
*/
|
||||
int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
|
||||
struct iio_chan_spec const *chan,
|
||||
int *val, int *val2, long mask);
|
||||
|
||||
/**
|
||||
* cros_ec_sensors_core_write() - function to write a value to the sensor
|
||||
* @st: pointer to state information for device
|
||||
* @chan: channel specification structure table
|
||||
* @val: first part of value to write
|
||||
* @val2: second part of value to write
|
||||
* @mask: specifies which values to write
|
||||
*
|
||||
* Return: the type of value returned by the device
|
||||
*/
|
||||
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
|
||||
struct iio_chan_spec const *chan,
|
||||
int val, int val2, long mask);
|
||||
|
||||
extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
|
||||
|
||||
/* List of extended channel specification for all sensors */
|
||||
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
|
||||
|
||||
#endif /* __CROS_EC_SENSORS_CORE_H */
|
@@ -183,18 +183,18 @@ struct iio_event_spec {
|
||||
* @address: Driver specific identifier.
|
||||
* @scan_index: Monotonic index to give ordering in scans when read
|
||||
* from a buffer.
|
||||
* @scan_type: sign: 's' or 'u' to specify signed or unsigned
|
||||
* realbits: Number of valid bits of data
|
||||
* storagebits: Realbits + padding
|
||||
* shift: Shift right by this before masking out
|
||||
* realbits.
|
||||
* repeat: Number of times real/storage bits
|
||||
* repeats. When the repeat element is
|
||||
* more than 1, then the type element in
|
||||
* sysfs will show a repeat value.
|
||||
* Otherwise, the number of repetitions is
|
||||
* omitted.
|
||||
* endianness: little or big endian
|
||||
* @scan_type: struct describing the scan type
|
||||
* @scan_type.sign: 's' or 'u' to specify signed or unsigned
|
||||
* @scan_type.realbits: Number of valid bits of data
|
||||
* @scan_type.storagebits: Realbits + padding
|
||||
* @scan_type.shift: Shift right by this before masking out
|
||||
* realbits.
|
||||
* @scan_type.repeat: Number of times real/storage bits repeats.
|
||||
* When the repeat element is more than 1, then
|
||||
* the type element in sysfs will show a repeat
|
||||
* value. Otherwise, the number of repetitions
|
||||
* is omitted.
|
||||
* @scan_type.endianness: little or big endian
|
||||
* @info_mask_separate: What information is to be exported that is specific to
|
||||
* this channel.
|
||||
* @info_mask_separate_available: What availability information is to be
|
||||
|
@@ -139,6 +139,7 @@ struct in_ifaddr {
|
||||
__be32 ifa_local;
|
||||
__be32 ifa_address;
|
||||
__be32 ifa_mask;
|
||||
__u32 ifa_rt_priority;
|
||||
__be32 ifa_broadcast;
|
||||
unsigned char ifa_scope;
|
||||
unsigned char ifa_prefixlen;
|
||||
|
@@ -121,7 +121,6 @@
|
||||
#define ecap_srs(e) ((e >> 31) & 0x1)
|
||||
#define ecap_ers(e) ((e >> 30) & 0x1)
|
||||
#define ecap_prs(e) ((e >> 29) & 0x1)
|
||||
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
|
||||
#define ecap_dis(e) ((e >> 27) & 0x1)
|
||||
#define ecap_nest(e) ((e >> 26) & 0x1)
|
||||
#define ecap_mts(e) ((e >> 25) & 0x1)
|
||||
|
@@ -432,11 +432,18 @@ extern bool force_irqthreads;
|
||||
#define force_irqthreads (0)
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
#define set_softirq_pending(x) (local_softirq_pending() = (x))
|
||||
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
|
||||
#ifndef local_softirq_pending
|
||||
|
||||
#ifndef local_softirq_pending_ref
|
||||
#define local_softirq_pending_ref irq_stat.__softirq_pending
|
||||
#endif
|
||||
|
||||
#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
|
||||
#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
|
||||
#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
|
||||
|
||||
#endif /* local_softirq_pending */
|
||||
|
||||
/* Some architectures might implement lazy enabling/disabling of
|
||||
* interrupts. In some cases, such as stop_machine, we might want
|
||||
* to ensure that after a local_irq_disable(), interrupts have
|
||||
|
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct address_space;
|
||||
struct fiemap_extent_info;
|
||||
struct inode;
|
||||
struct iov_iter;
|
||||
@@ -18,6 +19,7 @@ struct vm_fault;
|
||||
#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
|
||||
#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
|
||||
#define IOMAP_INLINE 0x05 /* data inline in the inode */
|
||||
|
||||
/*
|
||||
* Flags for all iomap mappings:
|
||||
@@ -26,15 +28,19 @@ struct vm_fault;
|
||||
* written data and requires fdatasync to commit them to persistent storage.
|
||||
*/
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
|
||||
#define IOMAP_F_DIRTY 0x04 /* uncommitted metadata */
|
||||
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
#define IOMAP_F_DATA_INLINE 0x40 /* data inline in the inode */
|
||||
|
||||
/*
|
||||
* Flags from 0x1000 up are for file system specific usage:
|
||||
*/
|
||||
#define IOMAP_F_PRIVATE 0x1000
|
||||
|
||||
|
||||
/*
|
||||
* Magic value for addr:
|
||||
@@ -59,7 +65,7 @@ struct iomap {
|
||||
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
|
||||
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
|
||||
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
|
||||
#define IOMAP_NOWAIT (1 << 5) /* Don't wait for writeback */
|
||||
#define IOMAP_NOWAIT (1 << 5) /* do not block */
|
||||
|
||||
struct iomap_ops {
|
||||
/*
|
||||
@@ -95,6 +101,8 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
|
||||
const struct iomap_ops *ops);
|
||||
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
|
||||
const struct iomap_ops *ops);
|
||||
sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
|
||||
const struct iomap_ops *ops);
|
||||
|
||||
/*
|
||||
* Flags for direct I/O ->end_io:
|
||||
@@ -106,4 +114,15 @@ typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
|
||||
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
struct file;
|
||||
struct swap_info_struct;
|
||||
|
||||
int iomap_swapfile_activate(struct swap_info_struct *sis,
|
||||
struct file *swap_file, sector_t *pagespan,
|
||||
const struct iomap_ops *ops);
|
||||
#else
|
||||
# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
|
||||
#endif /* CONFIG_SWAP */
|
||||
|
||||
#endif /* LINUX_IOMAP_H */
|
||||
|
@@ -1,53 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_IOMMU_COMMON_H
|
||||
#define _LINUX_IOMMU_COMMON_H
|
||||
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/device.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define IOMMU_POOL_HASHBITS 4
|
||||
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
|
||||
#define IOMMU_ERROR_CODE (~(unsigned long) 0)
|
||||
|
||||
struct iommu_pool {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long hint;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct iommu_map_table {
|
||||
unsigned long table_map_base;
|
||||
unsigned long table_shift;
|
||||
unsigned long nr_pools;
|
||||
void (*lazy_flush)(struct iommu_map_table *);
|
||||
unsigned long poolsize;
|
||||
struct iommu_pool pools[IOMMU_NR_POOLS];
|
||||
u32 flags;
|
||||
#define IOMMU_HAS_LARGE_POOL 0x00000001
|
||||
#define IOMMU_NO_SPAN_BOUND 0x00000002
|
||||
#define IOMMU_NEED_FLUSH 0x00000004
|
||||
struct iommu_pool large_pool;
|
||||
unsigned long *map;
|
||||
};
|
||||
|
||||
extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
|
||||
unsigned long num_entries,
|
||||
u32 table_shift,
|
||||
void (*lazy_flush)(struct iommu_map_table *),
|
||||
bool large_pool, u32 npools,
|
||||
bool skip_span_boundary_check);
|
||||
|
||||
extern unsigned long iommu_tbl_range_alloc(struct device *dev,
|
||||
struct iommu_map_table *iommu,
|
||||
unsigned long npages,
|
||||
unsigned long *handle,
|
||||
unsigned long mask,
|
||||
unsigned int align_order);
|
||||
|
||||
extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
|
||||
u64 dma_addr, unsigned long npages,
|
||||
unsigned long entry);
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user