Merge tag 'for-linus-timers-conversion-final-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into timers/urgent
Pull the last batch of manual timer conversions from Kees Cook: - final batch of "non trivial" timer conversions (multi-tree dependencies, things Coccinelle couldn't handle, etc). - treewide conversions via Coccinelle, in 4 steps: - DEFINE_TIMER() functions converted to struct timer_list * argument - init_timer() -> setup_timer() - setup_timer() -> timer_setup() - setup_timer() -> timer_setup() (with a single embedded structure) - deprecated timer API removals (init_timer(), setup_*timer()) - finalization of new API (remove global casts)
This commit is contained in:
@@ -20,9 +20,6 @@
|
||||
|
||||
|
||||
#ifdef CONFIG_BSD_PROCESS_ACCT
|
||||
struct vfsmount;
|
||||
struct super_block;
|
||||
struct pacct_struct;
|
||||
struct pid_namespace;
|
||||
extern int acct_parm[]; /* for sysctl */
|
||||
extern void acct_collect(long exitcode, int group_dead);
|
||||
|
@@ -864,21 +864,16 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
|
||||
int acpi_dev_runtime_suspend(struct device *dev);
|
||||
int acpi_dev_runtime_resume(struct device *dev);
|
||||
int acpi_dev_suspend(struct device *dev, bool wakeup);
|
||||
int acpi_dev_resume(struct device *dev);
|
||||
int acpi_subsys_runtime_suspend(struct device *dev);
|
||||
int acpi_subsys_runtime_resume(struct device *dev);
|
||||
struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
|
||||
int acpi_dev_pm_attach(struct device *dev, bool power_on);
|
||||
#else
|
||||
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
|
||||
static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
return -ENODEV;
|
||||
@@ -887,22 +882,30 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
|
||||
int acpi_dev_suspend_late(struct device *dev);
|
||||
int acpi_dev_resume_early(struct device *dev);
|
||||
int acpi_subsys_prepare(struct device *dev);
|
||||
void acpi_subsys_complete(struct device *dev);
|
||||
int acpi_subsys_suspend_late(struct device *dev);
|
||||
int acpi_subsys_suspend_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_early(struct device *dev);
|
||||
int acpi_subsys_suspend(struct device *dev);
|
||||
int acpi_subsys_freeze(struct device *dev);
|
||||
int acpi_subsys_freeze_late(struct device *dev);
|
||||
int acpi_subsys_freeze_noirq(struct device *dev);
|
||||
int acpi_subsys_thaw_noirq(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
|
||||
static inline void acpi_subsys_complete(struct device *dev) {}
|
||||
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
@@ -1254,4 +1257,13 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_LPIT
|
||||
int lpit_read_residency_count_address(u64 *address);
|
||||
#else
|
||||
static inline int lpit_read_residency_count_address(u64 *address)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
@@ -49,8 +49,8 @@ static inline void acpi_configure_pmsi_domain(struct device *dev) { }
|
||||
/* IOMMU interface */
|
||||
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
|
||||
u64 *size) { }
|
||||
static inline
|
||||
const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
static inline const struct iommu_ops *iort_iommu_configure(
|
||||
struct device *dev)
|
||||
{ return NULL; }
|
||||
#endif
|
||||
|
||||
|
@@ -6,15 +6,30 @@
|
||||
#define _LINUX_ARCH_TOPOLOGY_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
void topology_normalize_cpu_scale(void);
|
||||
|
||||
struct device_node;
|
||||
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
|
||||
|
||||
DECLARE_PER_CPU(unsigned long, cpu_scale);
|
||||
|
||||
struct sched_domain;
|
||||
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
|
||||
static inline
|
||||
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
return per_cpu(cpu_scale, cpu);
|
||||
}
|
||||
|
||||
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
|
||||
|
||||
DECLARE_PER_CPU(unsigned long, freq_scale);
|
||||
|
||||
static inline
|
||||
unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
return per_cpu(freq_scale, cpu);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
|
||||
|
@@ -149,12 +149,6 @@ extern void audit_log_key(struct audit_buffer *ab,
|
||||
extern void audit_log_link_denied(const char *operation,
|
||||
const struct path *link);
|
||||
extern void audit_log_lost(const char *message);
|
||||
#ifdef CONFIG_SECURITY
|
||||
extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
|
||||
#else
|
||||
static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
extern int audit_log_task_context(struct audit_buffer *ab);
|
||||
extern void audit_log_task_info(struct audit_buffer *ab,
|
||||
@@ -203,8 +197,6 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
|
||||
static inline void audit_log_link_denied(const char *string,
|
||||
const struct path *link)
|
||||
{ }
|
||||
static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
|
||||
{ }
|
||||
static inline int audit_log_task_context(struct audit_buffer *ab)
|
||||
{
|
||||
return 0;
|
||||
@@ -356,6 +348,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
|
||||
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
|
||||
extern void __audit_mmap_fd(int fd, int flags);
|
||||
extern void __audit_log_kern_module(char *name);
|
||||
extern void __audit_fanotify(unsigned int response);
|
||||
|
||||
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
@@ -452,6 +445,12 @@ static inline void audit_log_kern_module(char *name)
|
||||
__audit_log_kern_module(name);
|
||||
}
|
||||
|
||||
static inline void audit_fanotify(unsigned int response)
|
||||
{
|
||||
if (!audit_dummy_context())
|
||||
__audit_fanotify(response);
|
||||
}
|
||||
|
||||
extern int audit_n_rules;
|
||||
extern int audit_signals;
|
||||
#else /* CONFIG_AUDITSYSCALL */
|
||||
@@ -568,6 +567,9 @@ static inline void audit_log_kern_module(char *name)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void audit_fanotify(unsigned int response)
|
||||
{ }
|
||||
|
||||
static inline void audit_ptrace(struct task_struct *t)
|
||||
{ }
|
||||
#define audit_n_rules 0
|
||||
|
@@ -135,6 +135,7 @@ enum virtchnl_ops {
|
||||
VIRTCHNL_OP_SET_RSS_HENA = 26,
|
||||
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
|
||||
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
|
||||
VIRTCHNL_OP_REQUEST_QUEUES = 29,
|
||||
};
|
||||
|
||||
/* This macro is used to generate a compilation error if a structure
|
||||
@@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
|
||||
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
|
||||
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
|
||||
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
|
||||
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
|
||||
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
|
||||
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
|
||||
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
|
||||
@@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info {
|
||||
struct virtchnl_queue_pair_info qpair[1];
|
||||
};
|
||||
|
||||
/* VIRTCHNL_OP_REQUEST_QUEUES
|
||||
* VF sends this message to request the PF to allocate additional queues to
|
||||
* this VF. Each VF gets a guaranteed number of queues on init but asking for
|
||||
* additional queues must be negotiated. This is a best effort request as it
|
||||
* is possible the PF does not have enough queues left to support the request.
|
||||
* If the PF cannot support the number requested it will respond with the
|
||||
* maximum number it is able to support. If the request is successful, PF will
|
||||
* then reset the VF to institute required changes.
|
||||
*/
|
||||
|
||||
/* VF resource request */
|
||||
struct virtchnl_vf_res_request {
|
||||
u16 num_queue_pairs;
|
||||
};
|
||||
|
||||
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
|
||||
|
||||
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
|
||||
@@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
|
||||
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
|
||||
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
|
||||
break;
|
||||
case VIRTCHNL_OP_REQUEST_QUEUES:
|
||||
valid_len = sizeof(struct virtchnl_vf_res_request);
|
||||
break;
|
||||
/* These are always errors coming from the VF. */
|
||||
case VIRTCHNL_OP_EVENT:
|
||||
case VIRTCHNL_OP_UNKNOWN:
|
||||
|
@@ -25,6 +25,7 @@ enum wb_state {
|
||||
WB_shutting_down, /* wb_shutdown() in progress */
|
||||
WB_writeback_running, /* Writeback is in progress */
|
||||
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
|
||||
WB_start_all, /* nr_pages == 0 (all) work pending */
|
||||
};
|
||||
|
||||
enum wb_congested_state {
|
||||
@@ -44,6 +45,28 @@ enum wb_stat_item {
|
||||
|
||||
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
||||
|
||||
/*
|
||||
* why some writeback work was initiated
|
||||
*/
|
||||
enum wb_reason {
|
||||
WB_REASON_BACKGROUND,
|
||||
WB_REASON_VMSCAN,
|
||||
WB_REASON_SYNC,
|
||||
WB_REASON_PERIODIC,
|
||||
WB_REASON_LAPTOP_TIMER,
|
||||
WB_REASON_FREE_MORE_MEM,
|
||||
WB_REASON_FS_FREE_SPACE,
|
||||
/*
|
||||
* There is no bdi forker thread any more and works are done
|
||||
* by emergency worker, however, this is TPs userland visible
|
||||
* and we'll be exposing exactly the same information,
|
||||
* so it has a mismatch name.
|
||||
*/
|
||||
WB_REASON_FORKER_THREAD,
|
||||
|
||||
WB_REASON_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* For cgroup writeback, multiple wb's may map to the same blkcg. Those
|
||||
* wb's can operate mostly independently but should share the congested
|
||||
@@ -116,6 +139,7 @@ struct bdi_writeback {
|
||||
|
||||
struct fprop_local_percpu completions;
|
||||
int dirty_exceeded;
|
||||
enum wb_reason start_all_reason;
|
||||
|
||||
spinlock_t work_lock; /* protects work_list & dwork scheduling */
|
||||
struct list_head work_list;
|
||||
|
@@ -39,8 +39,6 @@ static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
|
||||
return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
||||
bool range_cyclic, enum wb_reason reason);
|
||||
void wb_start_background_writeback(struct bdi_writeback *wb);
|
||||
void wb_workfn(struct work_struct *work);
|
||||
void wb_wakeup_delayed(struct bdi_writeback *wb);
|
||||
@@ -95,7 +93,7 @@ extern void wb_writeout_inc(struct bdi_writeback *wb);
|
||||
/*
|
||||
* maximal error of a stat counter.
|
||||
*/
|
||||
static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
|
||||
static inline unsigned long wb_stat_error(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return nr_cpu_ids * WB_STAT_BATCH;
|
||||
@@ -124,6 +122,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
|
||||
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
|
||||
*
|
||||
* BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
|
||||
* BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
|
||||
* inefficient.
|
||||
*/
|
||||
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
|
||||
#define BDI_CAP_NO_WRITEBACK 0x00000002
|
||||
@@ -131,6 +131,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
|
||||
#define BDI_CAP_STABLE_WRITES 0x00000008
|
||||
#define BDI_CAP_STRICTLIMIT 0x00000010
|
||||
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
|
||||
#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
|
||||
|
||||
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
|
||||
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
|
||||
@@ -175,8 +176,11 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
|
||||
|
||||
long congestion_wait(int sync, long timeout);
|
||||
long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
|
||||
int pdflush_proc_obsolete(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
|
||||
}
|
||||
|
||||
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
|
||||
{
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
/*
|
||||
* Balloon device information descriptor.
|
||||
@@ -67,7 +68,9 @@ struct balloon_dev_info {
|
||||
struct inode *inode;
|
||||
};
|
||||
|
||||
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
|
||||
extern struct page *balloon_page_alloc(void);
|
||||
extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
|
||||
struct page *page);
|
||||
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
|
||||
|
||||
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
|
||||
@@ -193,4 +196,34 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
||||
|
||||
/*
|
||||
* balloon_page_push - insert a page into a page list.
|
||||
* @head : pointer to list
|
||||
* @page : page to be added
|
||||
*
|
||||
* Caller must ensure the page is private and protect the list.
|
||||
*/
|
||||
static inline void balloon_page_push(struct list_head *pages, struct page *page)
|
||||
{
|
||||
list_add(&page->lru, pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_pop - remove a page from a page list.
|
||||
* @head : pointer to list
|
||||
* @page : page to be added
|
||||
*
|
||||
* Caller must ensure the page is private and protect the list.
|
||||
*/
|
||||
static inline struct page *balloon_page_pop(struct list_head *pages)
|
||||
{
|
||||
struct page *page = list_first_entry_or_null(pages, struct page, lru);
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
list_del(&page->lru);
|
||||
return page;
|
||||
}
|
||||
#endif /* _LINUX_BALLOON_COMPACTION_H */
|
||||
|
@@ -128,18 +128,6 @@ static inline void *bio_data(struct bio *bio)
|
||||
*/
|
||||
#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
|
||||
|
||||
/*
|
||||
* queues that have highmem support enabled may still need to revert to
|
||||
* PIO transfers occasionally and thus map high pages temporarily. For
|
||||
* permanent PIO fall back, user is probably better off disabling highmem
|
||||
* I/O completely on that queue (see ide-dma for example)
|
||||
*/
|
||||
#define __bio_kmap_atomic(bio, iter) \
|
||||
(kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
|
||||
bio_iter_iovec((bio), (iter)).bv_offset)
|
||||
|
||||
#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
|
||||
|
||||
/*
|
||||
* merge helpers etc
|
||||
*/
|
||||
@@ -462,7 +450,7 @@ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||
struct rq_map_data;
|
||||
extern struct bio *bio_map_user_iov(struct request_queue *,
|
||||
const struct iov_iter *, gfp_t);
|
||||
struct iov_iter *, gfp_t);
|
||||
extern void bio_unmap_user(struct bio *);
|
||||
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
|
||||
gfp_t);
|
||||
@@ -494,7 +482,7 @@ extern void bio_free_pages(struct bio *bio);
|
||||
|
||||
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
struct rq_map_data *,
|
||||
const struct iov_iter *,
|
||||
struct iov_iter *,
|
||||
gfp_t);
|
||||
extern int bio_uncopy_user(struct bio *);
|
||||
void zero_fill_bio(struct bio *bio);
|
||||
@@ -522,13 +510,11 @@ do { \
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
|
||||
int bio_associate_current(struct bio *bio);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
static inline int bio_associate_blkcg(struct bio *bio,
|
||||
struct cgroup_subsys_state *blkcg_css) { return 0; }
|
||||
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
|
||||
static inline void bio_disassociate_task(struct bio *bio) { }
|
||||
static inline void bio_clone_blkcg_association(struct bio *dst,
|
||||
struct bio *src) { }
|
||||
@@ -575,17 +561,6 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
|
||||
unsigned long *flags)
|
||||
{
|
||||
return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
|
||||
}
|
||||
#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
|
||||
|
||||
#define bio_kmap_irq(bio, flags) \
|
||||
__bio_kmap_irq((bio), (bio)->bi_iter, (flags))
|
||||
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
|
||||
|
||||
/*
|
||||
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
|
||||
*
|
||||
|
@@ -15,7 +15,7 @@
|
||||
#ifndef _LINUX_BITFIELD_H
|
||||
#define _LINUX_BITFIELD_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/build_bug.h>
|
||||
|
||||
/*
|
||||
* Bitfield access macros
|
||||
|
@@ -228,6 +228,30 @@ static inline unsigned long __ffs64(u64 word)
|
||||
return __ffs((unsigned long)word);
|
||||
}
|
||||
|
||||
/**
|
||||
* assign_bit - Assign value to a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
* @value: the value to assign
|
||||
*/
|
||||
static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
|
||||
bool value)
|
||||
{
|
||||
if (value)
|
||||
set_bit(nr, addr);
|
||||
else
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
|
||||
bool value)
|
||||
{
|
||||
if (value)
|
||||
__set_bit(nr, addr);
|
||||
else
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef set_mask_bits
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
|
||||
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
|
||||
@@ -224,22 +225,16 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
||||
return css ? container_of(css, struct blkcg, css) : NULL;
|
||||
}
|
||||
|
||||
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
|
||||
{
|
||||
return css_to_blkcg(task_css(tsk, io_cgrp_id));
|
||||
}
|
||||
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
if (bio && bio->bi_css)
|
||||
return css_to_blkcg(bio->bi_css);
|
||||
return task_blkcg(current);
|
||||
}
|
||||
|
||||
static inline struct cgroup_subsys_state *
|
||||
task_get_blkcg_css(struct task_struct *task)
|
||||
{
|
||||
return task_get_css(task, io_cgrp_id);
|
||||
css = kthread_blkcg();
|
||||
if (css)
|
||||
return css_to_blkcg(css);
|
||||
return css_to_blkcg(task_css(current, io_cgrp_id));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -736,12 +731,6 @@ struct blkcg_policy {
|
||||
|
||||
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
|
||||
|
||||
static inline struct cgroup_subsys_state *
|
||||
task_get_blkcg_css(struct task_struct *task)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
|
@@ -31,10 +31,12 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
struct sbitmap ctx_map;
|
||||
|
||||
struct blk_mq_ctx *dispatch_from;
|
||||
|
||||
struct blk_mq_ctx **ctxs;
|
||||
unsigned int nr_ctx;
|
||||
|
||||
wait_queue_entry_t dispatch_wait;
|
||||
wait_queue_entry_t dispatch_wait;
|
||||
atomic_t wait_index;
|
||||
|
||||
struct blk_mq_tags *tags;
|
||||
@@ -91,6 +93,8 @@ struct blk_mq_queue_data {
|
||||
|
||||
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
|
||||
const struct blk_mq_queue_data *);
|
||||
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
|
||||
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
|
||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
@@ -112,6 +116,15 @@ struct blk_mq_ops {
|
||||
*/
|
||||
queue_rq_fn *queue_rq;
|
||||
|
||||
/*
|
||||
* Reserve budget before queue request, once .queue_rq is
|
||||
* run, it is driver's responsibility to release the
|
||||
* reserved budget. Also we have to handle failure case
|
||||
* of .get_budget for avoiding I/O deadlock.
|
||||
*/
|
||||
get_budget_fn *get_budget;
|
||||
put_budget_fn *put_budget;
|
||||
|
||||
/*
|
||||
* Called on request timeout
|
||||
*/
|
||||
@@ -169,8 +182,7 @@ enum {
|
||||
BLK_MQ_S_STOPPED = 0,
|
||||
BLK_MQ_S_TAG_ACTIVE = 1,
|
||||
BLK_MQ_S_SCHED_RESTART = 2,
|
||||
BLK_MQ_S_TAG_WAITING = 3,
|
||||
BLK_MQ_S_START_ON_RUN = 4,
|
||||
BLK_MQ_S_START_ON_RUN = 3,
|
||||
|
||||
BLK_MQ_MAX_DEPTH = 10240,
|
||||
|
||||
@@ -198,15 +210,21 @@ void blk_mq_free_request(struct request *rq);
|
||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||
|
||||
enum {
|
||||
BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
|
||||
BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
|
||||
BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
|
||||
/* return when out of requests */
|
||||
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
|
||||
/* allocate from reserved pool */
|
||||
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
|
||||
/* allocate internal/sched tag */
|
||||
BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
|
||||
/* set RQF_PREEMPT */
|
||||
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
|
||||
};
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||
unsigned int flags);
|
||||
blk_mq_req_flags_t flags);
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
unsigned int op, unsigned int flags, unsigned int hctx_idx);
|
||||
unsigned int op, blk_mq_req_flags_t flags,
|
||||
unsigned int hctx_idx);
|
||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
|
||||
enum {
|
||||
@@ -249,7 +267,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_quiesce_queue(struct request_queue *q);
|
||||
void blk_mq_unquiesce_queue(struct request_queue *q);
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
@@ -260,8 +278,8 @@ void blk_freeze_queue_start(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_wait(struct request_queue *q);
|
||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
||||
unsigned long timeout);
|
||||
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
|
||||
int (reinit_request)(void *, struct request *));
|
||||
int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
|
||||
int (reinit_request)(void *, struct request *));
|
||||
|
||||
int blk_mq_map_queues(struct blk_mq_tag_set *set);
|
||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
@@ -163,6 +163,8 @@ struct bio {
|
||||
*/
|
||||
#define BIO_RESET_BITS BVEC_POOL_OFFSET
|
||||
|
||||
typedef __u32 __bitwise blk_mq_req_flags_t;
|
||||
|
||||
/*
|
||||
* Operations and flags common to the bio and request structures.
|
||||
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
|
||||
@@ -225,11 +227,14 @@ enum req_flag_bits {
|
||||
__REQ_PREFLUSH, /* request for cache flush */
|
||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_BACKGROUND, /* background IO */
|
||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||
|
||||
/* command specific flags for REQ_OP_WRITE_ZEROES: */
|
||||
__REQ_NOUNMAP, /* do not free blocks when zeroing */
|
||||
|
||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||
/* for driver use */
|
||||
__REQ_DRV,
|
||||
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@@ -246,9 +251,11 @@ enum req_flag_bits {
|
||||
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
|
||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||
|
||||
#define REQ_DRV (1ULL << __REQ_DRV)
|
||||
|
||||
#define REQ_FAILFAST_MASK \
|
||||
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
||||
@@ -330,11 +337,10 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
|
||||
}
|
||||
|
||||
struct blk_rq_stat {
|
||||
s64 mean;
|
||||
u64 mean;
|
||||
u64 min;
|
||||
u64 max;
|
||||
s32 nr_samples;
|
||||
s32 nr_batch;
|
||||
u32 nr_samples;
|
||||
u64 batch;
|
||||
};
|
||||
|
||||
|
@@ -267,6 +267,7 @@ struct blk_queue_ctx;
|
||||
|
||||
typedef void (request_fn_proc) (struct request_queue *q);
|
||||
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
|
||||
typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
|
||||
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
||||
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
|
||||
|
||||
@@ -409,6 +410,7 @@ struct request_queue {
|
||||
|
||||
request_fn_proc *request_fn;
|
||||
make_request_fn *make_request_fn;
|
||||
poll_q_fn *poll_fn;
|
||||
prep_rq_fn *prep_rq_fn;
|
||||
unprep_rq_fn *unprep_rq_fn;
|
||||
softirq_done_fn *softirq_done_fn;
|
||||
@@ -610,7 +612,6 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
|
||||
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
|
||||
#define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */
|
||||
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
|
||||
@@ -632,14 +633,13 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
|
||||
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
|
||||
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
|
||||
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
(1 << QUEUE_FLAG_ADD_RANDOM))
|
||||
|
||||
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
(1 << QUEUE_FLAG_POLL))
|
||||
|
||||
@@ -723,8 +723,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
|
||||
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
|
||||
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
|
||||
#define blk_queue_stackable(q) \
|
||||
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
|
||||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
#define blk_queue_secure_erase(q) \
|
||||
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
|
||||
@@ -736,6 +734,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
||||
REQ_FAILFAST_DRIVER))
|
||||
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||
#define blk_queue_preempt_only(q) \
|
||||
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
|
||||
|
||||
extern int blk_set_preempt_only(struct request_queue *q);
|
||||
extern void blk_clear_preempt_only(struct request_queue *q);
|
||||
|
||||
static inline bool blk_account_rq(struct request *rq)
|
||||
{
|
||||
@@ -923,24 +926,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
#define vfs_msg(sb, level, fmt, ...) \
|
||||
__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vfs_msg(sb, level, fmt, ...) \
|
||||
do { \
|
||||
no_printk(fmt, ##__VA_ARGS__); \
|
||||
__vfs_msg(sb, "", " "); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
extern int blk_register_queue(struct gendisk *disk);
|
||||
extern void blk_unregister_queue(struct gendisk *disk);
|
||||
extern blk_qc_t generic_make_request(struct bio *bio);
|
||||
extern blk_qc_t direct_make_request(struct bio *bio);
|
||||
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
extern struct request *blk_get_request_flags(struct request_queue *,
|
||||
unsigned int op,
|
||||
blk_mq_req_flags_t flags);
|
||||
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
|
||||
gfp_t gfp_mask);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
@@ -964,7 +960,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
struct scsi_ioctl_command __user *);
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, bool nowait);
|
||||
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_start_queue_async(struct request_queue *q);
|
||||
@@ -991,7 +987,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
int blk_status_to_errno(blk_status_t status);
|
||||
blk_status_t errno_to_blk_status(int errno);
|
||||
|
||||
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
|
||||
bool blk_poll(struct request_queue *q, blk_qc_t cookie);
|
||||
|
||||
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
{
|
||||
@@ -1110,6 +1106,8 @@ extern struct request *blk_peek_request(struct request_queue *q);
|
||||
extern void blk_start_request(struct request *rq);
|
||||
extern struct request *blk_fetch_request(struct request_queue *q);
|
||||
|
||||
void blk_steal_bios(struct bio_list *list, struct request *rq);
|
||||
|
||||
/*
|
||||
* Request completion related functions.
|
||||
*
|
||||
@@ -1372,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
|
||||
gfp_mask, 0);
|
||||
}
|
||||
|
||||
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
|
||||
extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
|
||||
|
||||
enum blk_default_limits {
|
||||
BLK_MAX_SEGMENTS = 128,
|
||||
|
@@ -161,6 +161,9 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
||||
|
||||
/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
|
||||
void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr,
|
||||
phys_addr_t max_addr, int nid);
|
||||
void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
|
||||
phys_addr_t align, phys_addr_t min_addr,
|
||||
phys_addr_t max_addr, int nid);
|
||||
@@ -177,6 +180,14 @@ static inline void * __init memblock_virt_alloc(
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_raw(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
|
||||
BOOTMEM_ALLOC_ACCESSIBLE,
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_nopanic(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
@@ -258,6 +269,14 @@ static inline void * __init memblock_virt_alloc(
|
||||
return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_raw(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
if (!align)
|
||||
align = SMP_CACHE_BYTES;
|
||||
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_nopanic(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
@@ -310,6 +329,14 @@ static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
|
||||
min_addr);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_try_nid_raw(
|
||||
phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
|
||||
{
|
||||
return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
|
||||
min_addr, max_addr);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_try_nid_nopanic(
|
||||
phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
|
||||
|
@@ -15,27 +15,46 @@ struct bpf_sock_ops_kern;
|
||||
extern struct static_key_false cgroup_bpf_enabled_key;
|
||||
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
||||
|
||||
struct bpf_prog_list {
|
||||
struct list_head node;
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
struct bpf_prog_array;
|
||||
|
||||
struct cgroup_bpf {
|
||||
/*
|
||||
* Store two sets of bpf_prog pointers, one for programs that are
|
||||
* pinned directly to this cgroup, and one for those that are effective
|
||||
* when this cgroup is accessed.
|
||||
/* array of effective progs in this cgroup */
|
||||
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* attached progs to this cgroup and attach flags
|
||||
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
|
||||
* have either zero or one element
|
||||
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
||||
*/
|
||||
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
bool disallow_override[MAX_BPF_ATTACH_TYPE];
|
||||
struct list_head progs[MAX_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* temp storage for effective prog array used by prog_attach/detach */
|
||||
struct bpf_prog_array __rcu *inactive;
|
||||
};
|
||||
|
||||
void cgroup_bpf_put(struct cgroup *cgrp);
|
||||
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
|
||||
int cgroup_bpf_inherit(struct cgroup *cgrp);
|
||||
|
||||
int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
|
||||
struct bpf_prog *prog, enum bpf_attach_type type,
|
||||
bool overridable);
|
||||
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
|
||||
int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, bool overridable);
|
||||
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
|
||||
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
@@ -48,6 +67,9 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
struct bpf_sock_ops_kern *sock_ops,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
short access, enum bpf_attach_type type);
|
||||
|
||||
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
@@ -93,17 +115,28 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
|
||||
access, \
|
||||
BPF_CGROUP_DEVICE); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
#else
|
||||
|
||||
struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
|
||||
struct cgroup *parent) {}
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
|
||||
#endif /* CONFIG_CGROUP_BPF */
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/rbtree_latch.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct perf_event;
|
||||
struct bpf_prog;
|
||||
@@ -56,6 +57,10 @@ struct bpf_map {
|
||||
struct work_struct work;
|
||||
atomic_t usercnt;
|
||||
struct bpf_map *inner_map_meta;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* function argument constraints */
|
||||
@@ -137,6 +142,7 @@ enum bpf_reg_type {
|
||||
PTR_TO_MAP_VALUE, /* reg points to map element value */
|
||||
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
|
||||
PTR_TO_STACK, /* reg == frame_pointer + offset */
|
||||
PTR_TO_PACKET_META, /* skb->data - meta_len */
|
||||
PTR_TO_PACKET, /* reg points to skb->data */
|
||||
PTR_TO_PACKET_END, /* skb->data + headlen */
|
||||
};
|
||||
@@ -155,6 +161,11 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
|
||||
aux->ctx_field_size = size;
|
||||
}
|
||||
|
||||
struct bpf_prog_ops {
|
||||
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
};
|
||||
|
||||
struct bpf_verifier_ops {
|
||||
/* return eBPF function prototype for verification */
|
||||
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
|
||||
@@ -170,8 +181,16 @@ struct bpf_verifier_ops {
|
||||
const struct bpf_insn *src,
|
||||
struct bpf_insn *dst,
|
||||
struct bpf_prog *prog, u32 *target_size);
|
||||
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
};
|
||||
|
||||
struct bpf_dev_offload {
|
||||
struct bpf_prog *prog;
|
||||
struct net_device *netdev;
|
||||
void *dev_priv;
|
||||
struct list_head offloads;
|
||||
bool dev_state;
|
||||
bool verifier_running;
|
||||
wait_queue_head_t verifier_done;
|
||||
};
|
||||
|
||||
struct bpf_prog_aux {
|
||||
@@ -182,10 +201,16 @@ struct bpf_prog_aux {
|
||||
u32 id;
|
||||
struct latch_tree_node ksym_tnode;
|
||||
struct list_head ksym_lnode;
|
||||
const struct bpf_verifier_ops *ops;
|
||||
const struct bpf_prog_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
struct bpf_prog *prog;
|
||||
struct user_struct *user;
|
||||
u64 load_time; /* ns since boottime */
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
struct bpf_dev_offload *offload;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
@@ -218,9 +243,6 @@ struct bpf_event_entry {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
|
||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
|
||||
int bpf_prog_calc_tag(struct bpf_prog *fp);
|
||||
|
||||
@@ -237,19 +259,84 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
/* an array of programs to be executed under rcu_lock.
|
||||
*
|
||||
* Typical usage:
|
||||
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
|
||||
*
|
||||
* the structure returned by bpf_prog_array_alloc() should be populated
|
||||
* with program pointers and the last pointer must be NULL.
|
||||
* The user has to keep refcnt on the program and make sure the program
|
||||
* is removed from the array before bpf_prog_put().
|
||||
* The 'struct bpf_prog_array *' should only be replaced with xchg()
|
||||
* since other cpus are walking the array of pointers in parallel.
|
||||
*/
|
||||
struct bpf_prog_array {
|
||||
struct rcu_head rcu;
|
||||
struct bpf_prog *progs[0];
|
||||
};
|
||||
|
||||
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
__u32 __user *prog_ids, u32 cnt);
|
||||
|
||||
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
|
||||
struct bpf_prog *old_prog);
|
||||
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
struct bpf_prog *exclude_prog,
|
||||
struct bpf_prog *include_prog,
|
||||
struct bpf_prog_array **new_array);
|
||||
|
||||
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
|
||||
({ \
|
||||
struct bpf_prog **_prog, *__prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 _ret = 1; \
|
||||
rcu_read_lock(); \
|
||||
_array = rcu_dereference(array); \
|
||||
if (unlikely(check_non_null && !_array))\
|
||||
goto _out; \
|
||||
_prog = _array->progs; \
|
||||
while ((__prog = READ_ONCE(*_prog))) { \
|
||||
_ret &= func(__prog, ctx); \
|
||||
_prog++; \
|
||||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
|
||||
__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
|
||||
__BPF_PROG_RUN_ARRAY(array, ctx, func, true)
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
|
||||
#define BPF_PROG_TYPE(_id, _ops) \
|
||||
extern const struct bpf_verifier_ops _ops;
|
||||
extern const struct file_operations bpf_map_fops;
|
||||
extern const struct file_operations bpf_prog_fops;
|
||||
|
||||
#define BPF_PROG_TYPE(_id, _name) \
|
||||
extern const struct bpf_prog_ops _name ## _prog_ops; \
|
||||
extern const struct bpf_verifier_ops _name ## _verifier_ops;
|
||||
#define BPF_MAP_TYPE(_id, _ops) \
|
||||
extern const struct bpf_map_ops _ops;
|
||||
#include <linux/bpf_types.h>
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
|
||||
extern const struct bpf_prog_ops bpf_offload_prog_ops;
|
||||
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
|
||||
extern const struct bpf_verifier_ops xdp_analyzer_ops;
|
||||
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
|
||||
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
|
||||
struct net_device *netdev);
|
||||
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
|
||||
void bpf_prog_sub(struct bpf_prog *prog, int i);
|
||||
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
|
||||
@@ -269,11 +356,11 @@ void bpf_map_area_free(void *base);
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
|
||||
int bpf_map_new_fd(struct bpf_map *map);
|
||||
int bpf_map_new_fd(struct bpf_map *map, int flags);
|
||||
int bpf_prog_new_fd(struct bpf_prog *prog);
|
||||
|
||||
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
|
||||
int bpf_obj_get_user(const char __user *pathname);
|
||||
int bpf_obj_get_user(const char __user *pathname, int flags);
|
||||
|
||||
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
|
||||
@@ -292,6 +379,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
void *key, void *value, u64 map_flags);
|
||||
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
|
||||
|
||||
int bpf_get_file_flag(int flags);
|
||||
|
||||
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
|
||||
* forced to use 'long' read/writes to try to atomically copy long counters.
|
||||
* Best-effort only. No barriers here, since it _will_ race with concurrent
|
||||
@@ -316,6 +405,13 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||
void __dev_map_flush(struct bpf_map *map);
|
||||
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||
void __cpu_map_flush(struct bpf_map *map);
|
||||
struct xdp_buff;
|
||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
|
||||
/* Return map's numa specified by userspace */
|
||||
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
|
||||
{
|
||||
@@ -323,7 +419,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
|
||||
attr->numa_node : NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_BPF_SYSCALL */
|
||||
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
@@ -334,6 +430,14 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
|
||||
enum bpf_prog_type type,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
|
||||
int i)
|
||||
{
|
||||
@@ -368,7 +472,7 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int bpf_obj_get_user(const char __user *pathname)
|
||||
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -386,8 +490,54 @@ static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
|
||||
static inline void __dev_map_flush(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __cpu_map_flush(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
struct xdp_buff;
|
||||
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
|
||||
struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
int bpf_prog_offload_compile(struct bpf_prog *prog);
|
||||
void bpf_prog_offload_destroy(struct bpf_prog *prog);
|
||||
u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
|
||||
|
||||
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
|
||||
|
||||
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
|
||||
{
|
||||
return aux->offload;
|
||||
}
|
||||
#else
|
||||
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
|
||||
union bpf_attr *attr)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
|
||||
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
|
||||
|
@@ -2,22 +2,25 @@
|
||||
/* internal file - do not include directly */
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
|
||||
#endif
|
||||
#ifdef CONFIG_BPF_EVENTS
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
|
||||
#endif
|
||||
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
|
||||
@@ -42,4 +45,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
#ifdef CONFIG_STREAM_PARSER
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
|
||||
#endif
|
||||
|
@@ -88,14 +88,19 @@ enum bpf_stack_slot_type {
|
||||
|
||||
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||
|
||||
struct bpf_stack_state {
|
||||
struct bpf_reg_state spilled_ptr;
|
||||
u8 slot_type[BPF_REG_SIZE];
|
||||
};
|
||||
|
||||
/* state of the program:
|
||||
* type of all registers and stack info
|
||||
*/
|
||||
struct bpf_verifier_state {
|
||||
struct bpf_reg_state regs[MAX_BPF_REG];
|
||||
u8 stack_slot_type[MAX_BPF_STACK];
|
||||
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
|
||||
struct bpf_verifier_state *parent;
|
||||
int allocated_stack;
|
||||
struct bpf_stack_state *stack;
|
||||
};
|
||||
|
||||
/* linked list of verifier states used to prune search */
|
||||
@@ -115,6 +120,21 @@ struct bpf_insn_aux_data {
|
||||
|
||||
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
||||
|
||||
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
|
||||
|
||||
struct bpf_verifer_log {
|
||||
u32 level;
|
||||
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
|
||||
char __user *ubuf;
|
||||
u32 len_used;
|
||||
u32 len_total;
|
||||
};
|
||||
|
||||
static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
|
||||
{
|
||||
return log->len_used >= log->len_total - 1;
|
||||
}
|
||||
|
||||
struct bpf_verifier_env;
|
||||
struct bpf_ext_analyzer_ops {
|
||||
int (*insn_hook)(struct bpf_verifier_env *env,
|
||||
@@ -126,22 +146,35 @@ struct bpf_ext_analyzer_ops {
|
||||
*/
|
||||
struct bpf_verifier_env {
|
||||
struct bpf_prog *prog; /* eBPF program being verified */
|
||||
const struct bpf_verifier_ops *ops;
|
||||
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
||||
int stack_size; /* number of states to be processed */
|
||||
bool strict_alignment; /* perform strict pointer alignment checks */
|
||||
struct bpf_verifier_state cur_state; /* current verifier state */
|
||||
struct bpf_verifier_state *cur_state; /* current verifier state */
|
||||
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
|
||||
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
|
||||
void *analyzer_priv; /* pointer to external analyzer's private data */
|
||||
const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
|
||||
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
|
||||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
bool allow_ptr_leaks;
|
||||
bool seen_direct_write;
|
||||
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
||||
|
||||
struct bpf_verifer_log log;
|
||||
};
|
||||
|
||||
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
|
||||
void *priv);
|
||||
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
|
||||
{
|
||||
return env->cur_state->regs;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
|
||||
#else
|
||||
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
@@ -64,6 +64,7 @@
|
||||
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
|
||||
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
|
||||
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
|
||||
#define PHY_BRCM_EN_MASTER_MODE 0x00010000
|
||||
|
||||
/* Broadcom BCM7xxx specific workarounds */
|
||||
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
|
||||
|
@@ -157,7 +157,7 @@ void set_bh_page(struct buffer_head *bh,
|
||||
struct page *page, unsigned long offset);
|
||||
int try_to_free_buffers(struct page *);
|
||||
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
|
||||
int retry);
|
||||
bool retry);
|
||||
void create_empty_buffers(struct page *, unsigned long,
|
||||
unsigned long b_state);
|
||||
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
|
||||
|
@@ -43,6 +43,8 @@ enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
|
||||
/* These are defined by the architecture */
|
||||
int is_valid_bugaddr(unsigned long addr);
|
||||
|
||||
void generic_bug_clear_once(void);
|
||||
|
||||
#else /* !CONFIG_GENERIC_BUG */
|
||||
|
||||
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
|
||||
@@ -51,6 +53,9 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
|
||||
return BUG_TRAP_TYPE_BUG;
|
||||
}
|
||||
|
||||
|
||||
static inline void generic_bug_clear_once(void) {}
|
||||
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
/*
|
||||
|
@@ -171,4 +171,20 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
|
||||
*var = cpu_to_be64(be64_to_cpu(*var) + val);
|
||||
}
|
||||
|
||||
static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
dst[i] = cpu_to_be32(src[i]);
|
||||
}
|
||||
|
||||
static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
dst[i] = be32_to_cpu(src[i]);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BYTEORDER_GENERIC_H */
|
||||
|
@@ -9,8 +9,6 @@
|
||||
* the Free Software Foundation
|
||||
*/
|
||||
|
||||
#include <linux/kmemcheck.h>
|
||||
|
||||
#define C2PORT_NAME_LEN 32
|
||||
|
||||
struct device;
|
||||
@@ -22,10 +20,8 @@ struct device;
|
||||
/* Main struct */
|
||||
struct c2port_ops;
|
||||
struct c2port_device {
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
unsigned int access:1;
|
||||
unsigned int flash_access:1;
|
||||
kmemcheck_bitfield_end(flags);
|
||||
|
||||
int id;
|
||||
char name[C2PORT_NAME_LEN];
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
|
||||
@@ -255,6 +256,57 @@ struct css_set {
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/*
|
||||
* cgroup basic resource usage statistics. Accounting is done per-cpu in
|
||||
* cgroup_cpu_stat which is then lazily propagated up the hierarchy on
|
||||
* reads.
|
||||
*
|
||||
* When a stat gets updated, the cgroup_cpu_stat and its ancestors are
|
||||
* linked into the updated tree. On the following read, propagation only
|
||||
* considers and consumes the updated tree. This makes reading O(the
|
||||
* number of descendants which have been active since last read) instead of
|
||||
* O(the total number of descendants).
|
||||
*
|
||||
* This is important because there can be a lot of (draining) cgroups which
|
||||
* aren't active and stat may be read frequently. The combination can
|
||||
* become very expensive. By propagating selectively, increasing reading
|
||||
* frequency decreases the cost of each read.
|
||||
*/
|
||||
struct cgroup_cpu_stat {
|
||||
/*
|
||||
* ->sync protects all the current counters. These are the only
|
||||
* fields which get updated in the hot path.
|
||||
*/
|
||||
struct u64_stats_sync sync;
|
||||
struct task_cputime cputime;
|
||||
|
||||
/*
|
||||
* Snapshots at the last reading. These are used to calculate the
|
||||
* deltas to propagate to the global counters.
|
||||
*/
|
||||
struct task_cputime last_cputime;
|
||||
|
||||
/*
|
||||
* Child cgroups with stat updates on this cpu since the last read
|
||||
* are linked on the parent's ->updated_children through
|
||||
* ->updated_next.
|
||||
*
|
||||
* In addition to being more compact, singly-linked list pointing
|
||||
* to the cgroup makes it unnecessary for each per-cpu struct to
|
||||
* point back to the associated cgroup.
|
||||
*
|
||||
* Protected by per-cpu cgroup_cpu_stat_lock.
|
||||
*/
|
||||
struct cgroup *updated_children; /* terminated by self cgroup */
|
||||
struct cgroup *updated_next; /* NULL iff not on the list */
|
||||
};
|
||||
|
||||
struct cgroup_stat {
|
||||
/* per-cpu statistics are collected into the folowing global counters */
|
||||
struct task_cputime cputime;
|
||||
struct prev_cputime prev_cputime;
|
||||
};
|
||||
|
||||
struct cgroup {
|
||||
/* self css with NULL ->ss, points back to this cgroup */
|
||||
struct cgroup_subsys_state self;
|
||||
@@ -354,6 +406,11 @@ struct cgroup {
|
||||
*/
|
||||
struct cgroup *dom_cgrp;
|
||||
|
||||
/* cgroup basic resource statistics */
|
||||
struct cgroup_cpu_stat __percpu *cpu_stat;
|
||||
struct cgroup_stat pending_stat; /* pending from children */
|
||||
struct cgroup_stat stat;
|
||||
|
||||
/*
|
||||
* list of pidlists, up to two for each namespace (one for procs, one
|
||||
* for tasks); created on demand.
|
||||
@@ -513,6 +570,8 @@ struct cgroup_subsys {
|
||||
void (*css_released)(struct cgroup_subsys_state *css);
|
||||
void (*css_free)(struct cgroup_subsys_state *css);
|
||||
void (*css_reset)(struct cgroup_subsys_state *css);
|
||||
int (*css_extra_stat_show)(struct seq_file *seq,
|
||||
struct cgroup_subsys_state *css);
|
||||
|
||||
int (*can_attach)(struct cgroup_taskset *tset);
|
||||
void (*cancel_attach)(struct cgroup_taskset *tset);
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <linux/cgroup-defs.h>
|
||||
|
||||
@@ -689,6 +690,63 @@ static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
|
||||
char *buf, size_t buflen) {}
|
||||
#endif /* !CONFIG_CGROUPS */
|
||||
|
||||
/*
|
||||
* Basic resource stats.
|
||||
*/
|
||||
#ifdef CONFIG_CGROUPS
|
||||
|
||||
#ifdef CONFIG_CGROUP_CPUACCT
|
||||
void cpuacct_charge(struct task_struct *tsk, u64 cputime);
|
||||
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
|
||||
#else
|
||||
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
||||
static inline void cpuacct_account_field(struct task_struct *tsk, int index,
|
||||
u64 val) {}
|
||||
#endif
|
||||
|
||||
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
|
||||
void __cgroup_account_cputime_field(struct cgroup *cgrp,
|
||||
enum cpu_usage_stat index, u64 delta_exec);
|
||||
|
||||
static inline void cgroup_account_cputime(struct task_struct *task,
|
||||
u64 delta_exec)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
|
||||
cpuacct_charge(task, delta_exec);
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(task);
|
||||
if (cgroup_parent(cgrp))
|
||||
__cgroup_account_cputime(cgrp, delta_exec);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cgroup_account_cputime_field(struct task_struct *task,
|
||||
enum cpu_usage_stat index,
|
||||
u64 delta_exec)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
|
||||
cpuacct_account_field(task, index, delta_exec);
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(task);
|
||||
if (cgroup_parent(cgrp))
|
||||
__cgroup_account_cputime_field(cgrp, index, delta_exec);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUPS */
|
||||
|
||||
static inline void cgroup_account_cputime(struct task_struct *task,
|
||||
u64 delta_exec) {}
|
||||
static inline void cgroup_account_cputime_field(struct task_struct *task,
|
||||
enum cpu_usage_stat index,
|
||||
u64 delta_exec) {}
|
||||
|
||||
#endif /* CONFIG_CGROUPS */
|
||||
|
||||
/*
|
||||
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
|
||||
* definition in cgroup-defs.h.
|
||||
|
@@ -682,10 +682,10 @@ struct clk_gpio {
|
||||
|
||||
extern const struct clk_ops clk_gpio_gate_ops;
|
||||
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned gpio, bool active_low,
|
||||
const char *parent_name, struct gpio_desc *gpiod,
|
||||
unsigned long flags);
|
||||
struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned gpio, bool active_low,
|
||||
const char *parent_name, struct gpio_desc *gpiod,
|
||||
unsigned long flags);
|
||||
void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
|
||||
|
||||
@@ -701,11 +701,11 @@ void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
|
||||
|
||||
extern const struct clk_ops clk_gpio_mux_ops;
|
||||
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
|
||||
const char * const *parent_names, u8 num_parents, unsigned gpio,
|
||||
bool active_low, unsigned long flags);
|
||||
const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
|
||||
unsigned long flags);
|
||||
struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
|
||||
const char * const *parent_names, u8 num_parents, unsigned gpio,
|
||||
bool active_low, unsigned long flags);
|
||||
const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
|
||||
unsigned long flags);
|
||||
void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
|
||||
|
||||
/**
|
||||
@@ -815,7 +815,12 @@ int of_clk_add_hw_provider(struct device_node *np,
|
||||
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
|
||||
void *data),
|
||||
void *data);
|
||||
int devm_of_clk_add_hw_provider(struct device *dev,
|
||||
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
|
||||
void *data),
|
||||
void *data);
|
||||
void of_clk_del_provider(struct device_node *np);
|
||||
void devm_of_clk_del_provider(struct device *dev);
|
||||
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
|
||||
void *data);
|
||||
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
|
||||
@@ -847,7 +852,15 @@ static inline int of_clk_add_hw_provider(struct device_node *np,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int devm_of_clk_add_hw_provider(struct device *dev,
|
||||
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
|
||||
void *data),
|
||||
void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void of_clk_del_provider(struct device_node *np) {}
|
||||
static inline void devm_of_clk_del_provider(struct device *dev) {}
|
||||
static inline struct clk *of_clk_src_simple_get(
|
||||
struct of_phandle_args *clkspec, void *data)
|
||||
{
|
||||
|
@@ -444,11 +444,6 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
extern int get_compat_itimerspec(struct itimerspec *dst,
|
||||
const struct compat_itimerspec __user *src);
|
||||
extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
|
||||
const struct itimerspec *src);
|
||||
|
||||
asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
|
||||
struct timezone __user *tz);
|
||||
asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
|
||||
@@ -456,8 +451,9 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
|
||||
|
||||
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
|
||||
|
||||
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
|
||||
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
|
||||
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
|
||||
extern int put_compat_sigset(compat_sigset_t __user *compat,
|
||||
const sigset_t *set, unsigned int size);
|
||||
|
||||
asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
|
||||
compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
|
||||
|
@@ -16,3 +16,6 @@
|
||||
* with any version that can compile the kernel
|
||||
*/
|
||||
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
|
||||
|
||||
#define randomized_struct_fields_start struct {
|
||||
#define randomized_struct_fields_end };
|
||||
|
@@ -58,7 +58,7 @@ struct config_item {
|
||||
struct list_head ci_entry;
|
||||
struct config_item *ci_parent;
|
||||
struct config_group *ci_group;
|
||||
struct config_item_type *ci_type;
|
||||
const struct config_item_type *ci_type;
|
||||
struct dentry *ci_dentry;
|
||||
};
|
||||
|
||||
@@ -72,7 +72,7 @@ static inline char *config_item_name(struct config_item * item)
|
||||
|
||||
extern void config_item_init_type_name(struct config_item *item,
|
||||
const char *name,
|
||||
struct config_item_type *type);
|
||||
const struct config_item_type *type);
|
||||
|
||||
extern struct config_item *config_item_get(struct config_item *);
|
||||
extern struct config_item *config_item_get_unless_zero(struct config_item *);
|
||||
@@ -101,7 +101,7 @@ struct config_group {
|
||||
extern void config_group_init(struct config_group *group);
|
||||
extern void config_group_init_type_name(struct config_group *group,
|
||||
const char *name,
|
||||
struct config_item_type *type);
|
||||
const struct config_item_type *type);
|
||||
|
||||
static inline struct config_group *to_config_group(struct config_item *item)
|
||||
{
|
||||
@@ -261,7 +261,7 @@ void configfs_remove_default_groups(struct config_group *group);
|
||||
struct config_group *
|
||||
configfs_register_default_group(struct config_group *parent_group,
|
||||
const char *name,
|
||||
struct config_item_type *item_type);
|
||||
const struct config_item_type *item_type);
|
||||
void configfs_unregister_default_group(struct config_group *group);
|
||||
|
||||
/* These functions can sleep and can alloc with GFP_KERNEL */
|
||||
|
@@ -22,7 +22,7 @@
|
||||
#define __CONNECTOR_H
|
||||
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/workqueue.h>
|
||||
@@ -49,7 +49,7 @@ struct cn_callback_id {
|
||||
|
||||
struct cn_callback_entry {
|
||||
struct list_head callback_entry;
|
||||
atomic_t refcnt;
|
||||
refcount_t refcnt;
|
||||
struct cn_queue_dev *pdev;
|
||||
|
||||
struct cn_callback_id id;
|
||||
|
@@ -917,8 +917,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void arch_freq_prepare_all(void);
|
||||
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
||||
|
||||
extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
|
||||
unsigned long max_freq);
|
||||
|
||||
/* the following are really really optional */
|
||||
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
||||
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
|
||||
|
@@ -155,6 +155,9 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_S390_SF_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
/*
|
||||
* Autoloaded crypto modules should only use a prefixed name to avoid allowing
|
||||
@@ -467,6 +468,45 @@ struct crypto_alg {
|
||||
struct module *cra_module;
|
||||
} CRYPTO_MINALIGN_ATTR;
|
||||
|
||||
/*
|
||||
* A helper struct for waiting for completion of async crypto ops
|
||||
*/
|
||||
struct crypto_wait {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
/*
|
||||
* Macro for declaring a crypto op async wait object on stack
|
||||
*/
|
||||
#define DECLARE_CRYPTO_WAIT(_wait) \
|
||||
struct crypto_wait _wait = { \
|
||||
COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
|
||||
|
||||
/*
|
||||
* Async ops completion helper functioons
|
||||
*/
|
||||
void crypto_req_done(struct crypto_async_request *req, int err);
|
||||
|
||||
static inline int crypto_wait_req(int err, struct crypto_wait *wait)
|
||||
{
|
||||
switch (err) {
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&wait->completion);
|
||||
reinit_completion(&wait->completion);
|
||||
err = wait->err;
|
||||
break;
|
||||
};
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void crypto_init_wait(struct crypto_wait *wait)
|
||||
{
|
||||
init_completion(&wait->completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Algorithm registration interface.
|
||||
*/
|
||||
|
@@ -157,6 +157,9 @@ struct cyclades_port {
|
||||
struct cyclades_icount icount;
|
||||
struct completion shutdown_wait;
|
||||
int throttle;
|
||||
#ifdef CONFIG_CYZ_INTR
|
||||
struct timer_list rx_full_timer;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define CLOSING_WAIT_DELAY 30*HZ
|
||||
|
@@ -96,7 +96,9 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev);
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops);
|
||||
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||
const struct iomap_ops *ops);
|
||||
pfn_t *pfnp, const struct iomap_ops *ops);
|
||||
int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||
pfn_t pfn);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
|
@@ -1,13 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* debugfs.h - a tiny little debug file system
|
||||
*
|
||||
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
|
||||
* Copyright (C) 2004 IBM Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* debugfs is for people to use instead of /proc or /sys.
|
||||
* See Documentation/filesystems/ for more details.
|
||||
*/
|
||||
@@ -23,7 +20,6 @@
|
||||
|
||||
struct device;
|
||||
struct file_operations;
|
||||
struct srcu_struct;
|
||||
|
||||
struct debugfs_blob_wrapper {
|
||||
void *data;
|
||||
@@ -43,25 +39,6 @@ struct debugfs_regset32 {
|
||||
|
||||
extern struct dentry *arch_debugfs_dir;
|
||||
|
||||
extern struct srcu_struct debugfs_srcu;
|
||||
|
||||
/**
|
||||
* debugfs_real_fops - getter for the real file operation
|
||||
* @filp: a pointer to a struct file
|
||||
*
|
||||
* Must only be called under the protection established by
|
||||
* debugfs_use_file_start().
|
||||
*/
|
||||
static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
|
||||
__must_hold(&debugfs_srcu)
|
||||
{
|
||||
/*
|
||||
* Neither the pointer to the struct file_operations, nor its
|
||||
* contents ever change -- srcu_dereference() is not needed here.
|
||||
*/
|
||||
return filp->f_path.dentry->d_fsdata;
|
||||
}
|
||||
|
||||
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static int __fops ## _open(struct inode *inode, struct file *file) \
|
||||
{ \
|
||||
@@ -107,10 +84,10 @@ struct dentry *debugfs_create_automount(const char *name,
|
||||
void debugfs_remove(struct dentry *dentry);
|
||||
void debugfs_remove_recursive(struct dentry *dentry);
|
||||
|
||||
int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
|
||||
__acquires(&debugfs_srcu);
|
||||
const struct file_operations *debugfs_real_fops(const struct file *filp);
|
||||
|
||||
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
|
||||
int debugfs_file_get(struct dentry *dentry);
|
||||
void debugfs_file_put(struct dentry *dentry);
|
||||
|
||||
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
@@ -239,15 +216,12 @@ static inline void debugfs_remove(struct dentry *dentry)
|
||||
static inline void debugfs_remove_recursive(struct dentry *dentry)
|
||||
{ }
|
||||
|
||||
static inline int debugfs_use_file_start(const struct dentry *dentry,
|
||||
int *srcu_idx)
|
||||
__acquires(&debugfs_srcu)
|
||||
static inline int debugfs_file_get(struct dentry *dentry)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void debugfs_use_file_finish(int srcu_idx)
|
||||
__releases(&debugfs_srcu)
|
||||
static inline void debugfs_file_put(struct dentry *dentry)
|
||||
{ }
|
||||
|
||||
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
|
||||
|
@@ -19,6 +19,13 @@
|
||||
|
||||
#define DEVFREQ_NAME_LEN 16
|
||||
|
||||
/* DEVFREQ governor name */
|
||||
#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
|
||||
#define DEVFREQ_GOV_PERFORMANCE "performance"
|
||||
#define DEVFREQ_GOV_POWERSAVE "powersave"
|
||||
#define DEVFREQ_GOV_USERSPACE "userspace"
|
||||
#define DEVFREQ_GOV_PASSIVE "passive"
|
||||
|
||||
/* DEVFREQ notifier interface */
|
||||
#define DEVFREQ_TRANSITION_NOTIFIER (0)
|
||||
|
||||
@@ -84,8 +91,9 @@ struct devfreq_dev_status {
|
||||
* from devfreq_remove_device() call. If the user
|
||||
* has registered devfreq->nb at a notifier-head,
|
||||
* this is the time to unregister it.
|
||||
* @freq_table: Optional list of frequencies to support statistics.
|
||||
* @max_state: The size of freq_table.
|
||||
* @freq_table: Optional list of frequencies to support statistics
|
||||
* and freq_table must be generated in ascending order.
|
||||
* @max_state: The size of freq_table.
|
||||
*/
|
||||
struct devfreq_dev_profile {
|
||||
unsigned long initial_freq;
|
||||
@@ -120,6 +128,8 @@ struct devfreq_dev_profile {
|
||||
* touch this.
|
||||
* @min_freq: Limit minimum frequency requested by user (0: none)
|
||||
* @max_freq: Limit maximum frequency requested by user (0: none)
|
||||
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
|
||||
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
|
||||
* @stop_polling: devfreq polling status of a device.
|
||||
* @total_trans: Number of devfreq transitions
|
||||
* @trans_table: Statistics of devfreq transitions
|
||||
@@ -153,6 +163,8 @@ struct devfreq {
|
||||
|
||||
unsigned long min_freq;
|
||||
unsigned long max_freq;
|
||||
unsigned long scaling_min_freq;
|
||||
unsigned long scaling_max_freq;
|
||||
bool stop_polling;
|
||||
|
||||
/* information for device frequency transition */
|
||||
|
@@ -97,6 +97,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
* @p: The private data of the driver core, only the driver core can
|
||||
* touch this.
|
||||
* @lock_key: Lock class key for use by the lock validator
|
||||
* @force_dma: Assume devices on this bus should be set up by dma_configure()
|
||||
* even if DMA capability is not explicitly described by firmware.
|
||||
*
|
||||
* A bus is a channel between the processor and one or more devices. For the
|
||||
* purposes of the device model, all devices are connected via a bus, even if
|
||||
@@ -135,6 +137,8 @@ struct bus_type {
|
||||
|
||||
struct subsys_private *p;
|
||||
struct lock_class_key lock_key;
|
||||
|
||||
bool force_dma;
|
||||
};
|
||||
|
||||
extern int __must_check bus_register(struct bus_type *bus);
|
||||
@@ -370,9 +374,6 @@ int subsys_virtual_register(struct bus_type *subsys,
|
||||
* @devnode: Callback to provide the devtmpfs.
|
||||
* @class_release: Called to release this class.
|
||||
* @dev_release: Called to release the device.
|
||||
* @suspend: Used to put the device to sleep mode, usually to a low power
|
||||
* state.
|
||||
* @resume: Used to bring the device from the sleep mode.
|
||||
* @shutdown_pre: Called at shut-down time before driver shutdown.
|
||||
* @ns_type: Callbacks so sysfs can detemine namespaces.
|
||||
* @namespace: Namespace of the device belongs to this class.
|
||||
@@ -400,8 +401,6 @@ struct class {
|
||||
void (*class_release)(struct class *class);
|
||||
void (*dev_release)(struct device *dev);
|
||||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*shutdown_pre)(struct device *dev);
|
||||
|
||||
const struct kobj_ns_type_operations *ns_type;
|
||||
@@ -1075,6 +1074,16 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
|
||||
{
|
||||
dev->power.driver_flags = flags;
|
||||
}
|
||||
|
||||
static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
|
||||
{
|
||||
return !!(dev->power.driver_flags & flags);
|
||||
}
|
||||
|
||||
static inline void device_lock(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev->mutex);
|
||||
|
@@ -1,17 +1,76 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
|
||||
#define DEVCG_ACC_MKNOD 1
|
||||
#define DEVCG_ACC_READ 2
|
||||
#define DEVCG_ACC_WRITE 4
|
||||
#define DEVCG_ACC_MASK (DEVCG_ACC_MKNOD | DEVCG_ACC_READ | DEVCG_ACC_WRITE)
|
||||
|
||||
#define DEVCG_DEV_BLOCK 1
|
||||
#define DEVCG_DEV_CHAR 2
|
||||
#define DEVCG_DEV_ALL 4 /* this represents all devices */
|
||||
|
||||
#ifdef CONFIG_CGROUP_DEVICE
|
||||
extern int __devcgroup_inode_permission(struct inode *inode, int mask);
|
||||
extern int devcgroup_inode_mknod(int mode, dev_t dev);
|
||||
extern int __devcgroup_check_permission(short type, u32 major, u32 minor,
|
||||
short access);
|
||||
#else
|
||||
static inline int __devcgroup_check_permission(short type, u32 major, u32 minor,
|
||||
short access)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
|
||||
static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
|
||||
short access)
|
||||
{
|
||||
int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
|
||||
|
||||
if (rc)
|
||||
return -EPERM;
|
||||
|
||||
return __devcgroup_check_permission(type, major, minor, access);
|
||||
}
|
||||
|
||||
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
|
||||
{
|
||||
short type, access = 0;
|
||||
|
||||
if (likely(!inode->i_rdev))
|
||||
return 0;
|
||||
if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
|
||||
|
||||
if (S_ISBLK(inode->i_mode))
|
||||
type = DEVCG_DEV_BLOCK;
|
||||
else if (S_ISCHR(inode->i_mode))
|
||||
type = DEVCG_DEV_CHAR;
|
||||
else
|
||||
return 0;
|
||||
return __devcgroup_inode_permission(inode, mask);
|
||||
|
||||
if (mask & MAY_WRITE)
|
||||
access |= DEVCG_ACC_WRITE;
|
||||
if (mask & MAY_READ)
|
||||
access |= DEVCG_ACC_READ;
|
||||
|
||||
return devcgroup_check_permission(type, imajor(inode), iminor(inode),
|
||||
access);
|
||||
}
|
||||
|
||||
static inline int devcgroup_inode_mknod(int mode, dev_t dev)
|
||||
{
|
||||
short type;
|
||||
|
||||
if (!S_ISBLK(mode) && !S_ISCHR(mode))
|
||||
return 0;
|
||||
|
||||
if (S_ISBLK(mode))
|
||||
type = DEVCG_DEV_BLOCK;
|
||||
else
|
||||
type = DEVCG_DEV_CHAR;
|
||||
|
||||
return devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
|
||||
DEVCG_ACC_MKNOD);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
|
||||
{ return 0; }
|
||||
|
@@ -128,7 +128,7 @@ struct dma_fence_cb {
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called called from atomic context, but not
|
||||
* This function can be called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
@@ -248,9 +248,12 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference(*fencep);
|
||||
if (!fence || !dma_fence_get_rcu(fence))
|
||||
if (!fence)
|
||||
return NULL;
|
||||
|
||||
if (!dma_fence_get_rcu(fence))
|
||||
continue;
|
||||
|
||||
/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
|
||||
* provides a full memory barrier upon success (such as now).
|
||||
* This is paired with the write barrier from assigning
|
||||
|
@@ -9,7 +9,6 @@
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
@@ -127,6 +126,8 @@ struct dma_map_ops {
|
||||
void (*sync_sg_for_device)(struct device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir);
|
||||
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
@@ -230,7 +231,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(ptr, size);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
@@ -263,11 +263,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
int i, ents;
|
||||
struct scatterlist *s;
|
||||
int ents;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
kmemcheck_mark_initialized(sg_virt(s), s->length);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
BUG_ON(ents < 0);
|
||||
@@ -297,7 +294,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(page_address(page) + offset, size);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
@@ -437,6 +433,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->cache_sync)
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
|
||||
|
@@ -41,20 +41,6 @@ struct xilinx_vdma_config {
|
||||
int ext_fsync;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum xdma_ip_type: DMA IP type.
|
||||
*
|
||||
* XDMA_TYPE_AXIDMA: Axi dma ip.
|
||||
* XDMA_TYPE_CDMA: Axi cdma ip.
|
||||
* XDMA_TYPE_VDMA: Axi vdma ip.
|
||||
*
|
||||
*/
|
||||
enum xdma_ip_type {
|
||||
XDMA_TYPE_AXIDMA = 0,
|
||||
XDMA_TYPE_CDMA,
|
||||
XDMA_TYPE_VDMA,
|
||||
};
|
||||
|
||||
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
|
||||
struct xilinx_vdma_config *cfg);
|
||||
|
||||
|
@@ -329,7 +329,7 @@ enum dma_slave_buswidth {
|
||||
* @src_addr_width: this is the width in bytes of the source (RX)
|
||||
* register where DMA data shall be read. If the source
|
||||
* is memory this may be ignored depending on architecture.
|
||||
* Legal values: 1, 2, 4, 8.
|
||||
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
|
||||
* @dst_addr_width: same as src_addr_width but for destination
|
||||
* target (TX) mutatis mutandis.
|
||||
* @src_maxburst: the maximum number of words (note: words, as in
|
||||
@@ -404,14 +404,16 @@ enum dma_residue_granularity {
|
||||
DMA_RESIDUE_GRANULARITY_BURST = 2,
|
||||
};
|
||||
|
||||
/* struct dma_slave_caps - expose capabilities of a slave channel only
|
||||
*
|
||||
* @src_addr_widths: bit mask of src addr widths the channel supports
|
||||
* @dst_addr_widths: bit mask of dstn addr widths the channel supports
|
||||
* @directions: bit mask of slave direction the channel supported
|
||||
* since the enum dma_transfer_direction is not defined as bits for each
|
||||
* type of direction, the dma controller should fill (1 << <TYPE>) and same
|
||||
* should be checked by controller as well
|
||||
/**
|
||||
* struct dma_slave_caps - expose capabilities of a slave channel only
|
||||
* @src_addr_widths: bit mask of src addr widths the channel supports.
|
||||
* Width is specified in bytes, e.g. for a channel supporting
|
||||
* a width of 4 the mask should have BIT(4) set.
|
||||
* @dst_addr_widths: bit mask of dst addr widths the channel supports
|
||||
* @directions: bit mask of slave directions the channel supports.
|
||||
* Since the enum dma_transfer_direction is not defined as bit flag for
|
||||
* each type, the dma controller should set BIT(<TYPE>) and same
|
||||
* should be checked by controller as well
|
||||
* @max_burst: max burst capability per-transfer
|
||||
* @cmd_pause: true, if pause and thereby resume is supported
|
||||
* @cmd_terminate: true, if terminate cmd is supported
|
||||
@@ -678,11 +680,13 @@ struct dma_filter {
|
||||
* @dev_id: unique device ID
|
||||
* @dev: struct device reference for dma mapping api
|
||||
* @src_addr_widths: bit mask of src addr widths the device supports
|
||||
* Width is specified in bytes, e.g. for a device supporting
|
||||
* a width of 4 the mask should have BIT(4) set.
|
||||
* @dst_addr_widths: bit mask of dst addr widths the device supports
|
||||
* @directions: bit mask of slave direction the device supports since
|
||||
* the enum dma_transfer_direction is not defined as bits for
|
||||
* each type of direction, the dma controller should fill (1 <<
|
||||
* <TYPE>) and same should be checked by controller as well
|
||||
* @directions: bit mask of slave directions the device supports.
|
||||
* Since the enum dma_transfer_direction is not defined as bit flag for
|
||||
* each type, the dma controller should set BIT(<TYPE>) and same
|
||||
* should be checked by controller as well
|
||||
* @max_burst: max burst capability per-transfer
|
||||
* @residue_granularity: granularity of the transfer residue reported
|
||||
* by tx_status
|
||||
|
@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void)
|
||||
|
||||
extern int dmar_table_init(void);
|
||||
extern int dmar_dev_scope_init(void);
|
||||
extern void dmar_register_bus_notifier(void);
|
||||
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||
struct dmar_dev_scope **devices, u16 segment);
|
||||
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
|
||||
|
38
include/linux/dsa/lan9303.h
Normal file
38
include/linux/dsa/lan9303.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/* Included by drivers/net/dsa/lan9303.h and net/dsa/tag_lan9303.c */
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
struct lan9303;
|
||||
|
||||
struct lan9303_phy_ops {
|
||||
/* PHY 1 and 2 access*/
|
||||
int (*phy_read)(struct lan9303 *chip, int port, int regnum);
|
||||
int (*phy_write)(struct lan9303 *chip, int port,
|
||||
int regnum, u16 val);
|
||||
};
|
||||
|
||||
#define LAN9303_NUM_ALR_RECORDS 512
|
||||
struct lan9303_alr_cache_entry {
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 port_map; /* Bitmap of ports. Zero if unused entry */
|
||||
u8 stp_override; /* non zero if set LAN9303_ALR_DAT1_AGE_OVERRID */
|
||||
};
|
||||
|
||||
struct lan9303 {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
struct regmap_irq_chip_data *irq_data;
|
||||
struct gpio_desc *reset_gpio;
|
||||
u32 reset_duration; /* in [ms] */
|
||||
bool phy_addr_sel_strap;
|
||||
struct dsa_switch *ds;
|
||||
struct mutex indirect_mutex; /* protect indexed register access */
|
||||
const struct lan9303_phy_ops *ops;
|
||||
bool is_bridged; /* true if port 1 and 2 are bridged */
|
||||
|
||||
/* remember LAN9303_SWE_PORT_STATE while not bridged */
|
||||
u32 swe_port_state;
|
||||
/* LAN9303 do not offer reading specific ALR entry. Cache all
|
||||
* static entries in a flat table
|
||||
**/
|
||||
struct lan9303_alr_cache_entry alr_cache[LAN9303_NUM_ALR_RECORDS];
|
||||
};
|
@@ -99,7 +99,7 @@ void dql_completed(struct dql *dql, unsigned int count);
|
||||
void dql_reset(struct dql *dql);
|
||||
|
||||
/* Initialize dql state */
|
||||
int dql_init(struct dql *dql, unsigned hold_time);
|
||||
void dql_init(struct dql *dql, unsigned int hold_time);
|
||||
|
||||
#endif /* _KERNEL_ */
|
||||
|
||||
|
@@ -145,6 +145,7 @@ struct elevator_type
|
||||
size_t icq_align; /* ditto */
|
||||
struct elv_fs_entry *elevator_attrs;
|
||||
char elevator_name[ELV_NAME_MAX];
|
||||
const char *elevator_alias;
|
||||
struct module *elevator_owner;
|
||||
bool uses_mq;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
|
@@ -66,6 +66,7 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff);
|
||||
/* Reserved Ethernet Addresses per IEEE 802.1Q */
|
||||
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
|
||||
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
|
||||
#define eth_stp_addr eth_reserved_addr_base
|
||||
|
||||
/**
|
||||
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
|
||||
|
@@ -164,6 +164,16 @@ extern int
|
||||
__ethtool_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *link_ksettings);
|
||||
|
||||
/**
|
||||
* ethtool_intersect_link_masks - Given two link masks, AND them together
|
||||
* @dst: first mask and where result is stored
|
||||
* @src: second mask to intersect with
|
||||
*
|
||||
* Given two link mode masks, AND them together and save the result in dst.
|
||||
*/
|
||||
void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
|
||||
struct ethtool_link_ksettings *src);
|
||||
|
||||
void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
|
||||
u32 legacy_u32);
|
||||
|
||||
|
142
include/linux/extcon-provider.h
Normal file
142
include/linux/extcon-provider.h
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
* External Connector (extcon) framework
|
||||
* - linux/include/linux/extcon-provider.h for extcon provider device driver.
|
||||
*
|
||||
* Copyright (C) 2017 Samsung Electronics
|
||||
* Author: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_EXTCON_PROVIDER_H__
|
||||
#define __LINUX_EXTCON_PROVIDER_H__
|
||||
|
||||
#include <linux/extcon.h>
|
||||
|
||||
struct extcon_dev;
|
||||
|
||||
#if IS_ENABLED(CONFIG_EXTCON)
|
||||
|
||||
/* Following APIs register/unregister the extcon device. */
|
||||
extern int extcon_dev_register(struct extcon_dev *edev);
|
||||
extern void extcon_dev_unregister(struct extcon_dev *edev);
|
||||
extern int devm_extcon_dev_register(struct device *dev,
|
||||
struct extcon_dev *edev);
|
||||
extern void devm_extcon_dev_unregister(struct device *dev,
|
||||
struct extcon_dev *edev);
|
||||
|
||||
/* Following APIs allocate/free the memory of the extcon device. */
|
||||
extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
|
||||
extern void extcon_dev_free(struct extcon_dev *edev);
|
||||
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
const unsigned int *cable);
|
||||
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
|
||||
|
||||
/* Synchronize the state and property value for each external connector. */
|
||||
extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
|
||||
|
||||
/*
|
||||
* Following APIs set the connected state of each external connector.
|
||||
* The 'id' argument indicates the defined external connector.
|
||||
*/
|
||||
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool state);
|
||||
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool state);
|
||||
|
||||
/*
|
||||
* Following APIs set the property of each external connector.
|
||||
* The 'id' argument indicates the defined external connector
|
||||
* and the 'prop' indicates the extcon property.
|
||||
*
|
||||
* And extcon_set_property_capability() set the capability of the property
|
||||
* for each external connector. They are used to set the capability of the
|
||||
* property of each external connector based on the id and property.
|
||||
*/
|
||||
extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
extern int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop);
|
||||
|
||||
#else /* CONFIG_EXTCON */
|
||||
static inline int extcon_dev_register(struct extcon_dev *edev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
|
||||
|
||||
static inline int devm_extcon_dev_register(struct device *dev,
|
||||
struct extcon_dev *edev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void devm_extcon_dev_unregister(struct device *dev,
|
||||
struct extcon_dev *edev) { }
|
||||
|
||||
static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline void extcon_dev_free(struct extcon_dev *edev) { }
|
||||
|
||||
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
const unsigned int *cable)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
|
||||
|
||||
|
||||
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_sync(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_EXTCON */
|
||||
#endif /* __LINUX_EXTCON_PROVIDER_H__ */
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* External Connector (extcon) framework
|
||||
* - linux/include/linux/extcon.h for extcon consumer device driver.
|
||||
*
|
||||
* Copyright (C) 2015 Samsung Electronics
|
||||
* Author: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
@@ -170,61 +171,29 @@ union extcon_property_value {
|
||||
int intval; /* type : integer (intval) */
|
||||
};
|
||||
|
||||
struct extcon_cable;
|
||||
struct extcon_dev;
|
||||
|
||||
#if IS_ENABLED(CONFIG_EXTCON)
|
||||
|
||||
/* Following APIs register/unregister the extcon device. */
|
||||
extern int extcon_dev_register(struct extcon_dev *edev);
|
||||
extern void extcon_dev_unregister(struct extcon_dev *edev);
|
||||
extern int devm_extcon_dev_register(struct device *dev,
|
||||
struct extcon_dev *edev);
|
||||
extern void devm_extcon_dev_unregister(struct device *dev,
|
||||
struct extcon_dev *edev);
|
||||
|
||||
/* Following APIs allocate/free the memory of the extcon device. */
|
||||
extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
|
||||
extern void extcon_dev_free(struct extcon_dev *edev);
|
||||
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
const unsigned int *cable);
|
||||
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
|
||||
|
||||
/* Synchronize the state and property value for each external connector. */
|
||||
extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
|
||||
|
||||
/*
|
||||
* Following APIs get/set the connected state of each external connector.
|
||||
* Following APIs get the connected state of each external connector.
|
||||
* The 'id' argument indicates the defined external connector.
|
||||
*/
|
||||
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
|
||||
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool state);
|
||||
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool state);
|
||||
|
||||
/*
|
||||
* Following APIs get/set the property of each external connector.
|
||||
* Following APIs get the property of each external connector.
|
||||
* The 'id' argument indicates the defined external connector
|
||||
* and the 'prop' indicates the extcon property.
|
||||
*
|
||||
* And extcon_get/set_property_capability() set the capability of the property
|
||||
* for each external connector. They are used to set the capability of the
|
||||
* And extcon_get_property_capability() get the capability of the property
|
||||
* for each external connector. They are used to get the capability of the
|
||||
* property of each external connector based on the id and property.
|
||||
*/
|
||||
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value *prop_val);
|
||||
extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
extern int extcon_get_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop);
|
||||
extern int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop);
|
||||
|
||||
/*
|
||||
* Following APIs register the notifier block in order to detect
|
||||
@@ -268,79 +237,17 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
|
||||
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
|
||||
|
||||
#else /* CONFIG_EXTCON */
|
||||
static inline int extcon_dev_register(struct extcon_dev *edev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
|
||||
|
||||
static inline int devm_extcon_dev_register(struct device *dev,
|
||||
struct extcon_dev *edev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void devm_extcon_dev_unregister(struct device *dev,
|
||||
struct extcon_dev *edev) { }
|
||||
|
||||
static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline void extcon_dev_free(struct extcon_dev *edev) { }
|
||||
|
||||
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
const unsigned int *cable)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
|
||||
|
||||
|
||||
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value *prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_sync(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_get_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop)
|
||||
@@ -348,12 +255,6 @@ static inline int extcon_get_property_capability(struct extcon_dev *edev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_register_notifier(struct extcon_dev *edev,
|
||||
unsigned int id, struct notifier_block *nb)
|
||||
{
|
||||
|
@@ -36,6 +36,8 @@
|
||||
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
|
||||
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
|
||||
|
||||
#define F2FS_MAX_QUOTAS 3
|
||||
|
||||
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
|
||||
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
|
||||
#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
|
||||
@@ -108,7 +110,8 @@ struct f2fs_super_block {
|
||||
__u8 encryption_level; /* versioning level for encryption */
|
||||
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
|
||||
struct f2fs_device devs[MAX_DEVICES]; /* device list */
|
||||
__u8 reserved[327]; /* valid reserved region */
|
||||
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
|
||||
__u8 reserved[315]; /* valid reserved region */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
@@ -184,7 +187,8 @@ struct f2fs_extent {
|
||||
} __packed;
|
||||
|
||||
#define F2FS_NAME_LEN 255
|
||||
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
|
||||
/* 200 bytes for inline xattrs by default */
|
||||
#define DEFAULT_INLINE_XATTR_ADDRS 50
|
||||
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
|
||||
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
|
||||
get_extra_isize(inode))
|
||||
@@ -238,7 +242,7 @@ struct f2fs_inode {
|
||||
union {
|
||||
struct {
|
||||
__le16 i_extra_isize; /* extra inode attribute size */
|
||||
__le16 i_padding; /* padding */
|
||||
__le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
|
||||
__le32 i_projid; /* project id */
|
||||
__le32 i_inode_checksum;/* inode meta checksum */
|
||||
__le32 i_extra_end[0]; /* for attribute size calculation */
|
||||
|
@@ -454,13 +454,11 @@ struct bpf_binary_header {
|
||||
|
||||
struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
kmemcheck_bitfield_begin(meta);
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
locked:1, /* Program image locked? */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1; /* Do we need dst entry? */
|
||||
kmemcheck_bitfield_end(meta);
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
u32 len; /* Number of filter blocks */
|
||||
u32 jited_len; /* Size of jited insns in bytes */
|
||||
@@ -482,30 +480,36 @@ struct sk_filter {
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
||||
#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
|
||||
|
||||
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
|
||||
|
||||
struct bpf_skb_data_end {
|
||||
struct qdisc_skb_cb qdisc_cb;
|
||||
void *data_meta;
|
||||
void *data_end;
|
||||
};
|
||||
|
||||
struct xdp_buff {
|
||||
void *data;
|
||||
void *data_end;
|
||||
void *data_meta;
|
||||
void *data_hard_start;
|
||||
};
|
||||
|
||||
/* compute the linear packet data range [data, data_end) which
|
||||
* will be accessed by cls_bpf, act_bpf and lwt programs
|
||||
/* Compute the linear packet data range [data, data_end) which
|
||||
* will be accessed by various program types (cls_bpf, act_bpf,
|
||||
* lwt, ...). Subsystems allowing direct data access must (!)
|
||||
* ensure that cb[] area can be written to when BPF program is
|
||||
* invoked (otherwise cb[] save/restore is necessary).
|
||||
*/
|
||||
static inline void bpf_compute_data_end(struct sk_buff *skb)
|
||||
static inline void bpf_compute_data_pointers(struct sk_buff *skb)
|
||||
{
|
||||
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
|
||||
cb->data_end = skb->data + skb_headlen(skb);
|
||||
cb->data_meta = skb->data - skb_metadata_len(skb);
|
||||
cb->data_end = skb->data + skb_headlen(skb);
|
||||
}
|
||||
|
||||
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||
@@ -726,8 +730,22 @@ int xdp_do_redirect(struct net_device *dev,
|
||||
struct bpf_prog *prog);
|
||||
void xdp_do_flush_map(void);
|
||||
|
||||
/* Drivers not supporting XDP metadata can use this helper, which
|
||||
* rejects any room expansion for metadata as a result.
|
||||
*/
|
||||
static __always_inline void
|
||||
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
|
||||
{
|
||||
xdp->data_meta = xdp->data + 1;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
||||
{
|
||||
return unlikely(xdp->data_meta > xdp->data);
|
||||
}
|
||||
|
||||
void bpf_warn_invalid_xdp_action(u32 act);
|
||||
void bpf_warn_invalid_xdp_redirect(u32 ifindex);
|
||||
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb);
|
||||
|
||||
|
@@ -182,7 +182,7 @@ static inline void freezable_schedule_unsafe(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Like freezable_schedule_timeout(), but should not block the freezer. Do not
|
||||
* Like schedule_timeout(), but should not block the freezer. Do not
|
||||
* call this with locks held.
|
||||
*/
|
||||
static inline long freezable_schedule_timeout(long timeout)
|
||||
|
@@ -971,8 +971,8 @@ struct lock_manager {
|
||||
struct net;
|
||||
void locks_start_grace(struct net *, struct lock_manager *);
|
||||
void locks_end_grace(struct lock_manager *);
|
||||
int locks_in_grace(struct net *);
|
||||
int opens_in_grace(struct net *);
|
||||
bool locks_in_grace(struct net *);
|
||||
bool opens_in_grace(struct net *);
|
||||
|
||||
/* that will die - we need it for nfs_lock_info */
|
||||
#include <linux/nfs_fs_i.h>
|
||||
@@ -1702,6 +1702,7 @@ struct file_operations {
|
||||
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
int (*mmap) (struct file *, struct vm_area_struct *);
|
||||
unsigned long mmap_supported_flags;
|
||||
int (*open) (struct inode *, struct file *);
|
||||
int (*flush) (struct file *, fl_owner_t id);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
@@ -1854,6 +1855,7 @@ struct super_operations {
|
||||
#else
|
||||
#define S_DAX 0 /* Make all the DAX code disappear */
|
||||
#endif
|
||||
#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
|
||||
|
||||
/*
|
||||
* Note that nosuid etc flags are inode-specific: setting some file-system
|
||||
@@ -1893,6 +1895,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
|
||||
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
|
||||
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
|
||||
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
|
||||
#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
|
||||
|
||||
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
|
||||
(inode)->i_rdev == WHITEOUT_DEV)
|
||||
@@ -2095,9 +2098,18 @@ struct file_system_type {
|
||||
extern struct dentry *mount_ns(struct file_system_type *fs_type,
|
||||
int flags, void *data, void *ns, struct user_namespace *user_ns,
|
||||
int (*fill_super)(struct super_block *, void *, int));
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data,
|
||||
int (*fill_super)(struct super_block *, void *, int));
|
||||
#else
|
||||
static inline struct dentry *mount_bdev(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data,
|
||||
int (*fill_super)(struct super_block *, void *, int))
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
#endif
|
||||
extern struct dentry *mount_single(struct file_system_type *fs_type,
|
||||
int flags, void *data,
|
||||
int (*fill_super)(struct super_block *, void *, int));
|
||||
@@ -2106,7 +2118,14 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
|
||||
int (*fill_super)(struct super_block *, void *, int));
|
||||
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
|
||||
void generic_shutdown_super(struct super_block *sb);
|
||||
#ifdef CONFIG_BLOCK
|
||||
void kill_block_super(struct super_block *sb);
|
||||
#else
|
||||
static inline void kill_block_super(struct super_block *sb)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
void kill_anon_super(struct super_block *sb);
|
||||
void kill_litter_super(struct super_block *sb);
|
||||
void deactivate_super(struct super_block *sb);
|
||||
@@ -2170,7 +2189,6 @@ extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
|
||||
extern int vfs_statfs(const struct path *, struct kstatfs *);
|
||||
extern int user_statfs(const char __user *, struct kstatfs *);
|
||||
extern int fd_statfs(int, struct kstatfs *);
|
||||
extern int vfs_ustat(dev_t, struct kstatfs *);
|
||||
extern int freeze_super(struct super_block *super);
|
||||
extern int thaw_super(struct super_block *super);
|
||||
extern bool our_mnt(struct vfsmount *mnt);
|
||||
|
294
include/linux/fscrypt.h
Normal file
294
include/linux/fscrypt.h
Normal file
@@ -0,0 +1,294 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* fscrypt.h: declarations for per-file encryption
|
||||
*
|
||||
* Filesystems that implement per-file encryption include this header
|
||||
* file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
|
||||
* is being built with encryption support or not.
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
*
|
||||
* Written by Michael Halcrow, 2015.
|
||||
* Modified by Jaegeuk Kim, 2015.
|
||||
*/
|
||||
#ifndef _LINUX_FSCRYPT_H
|
||||
#define _LINUX_FSCRYPT_H
|
||||
|
||||
#include <linux/key.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
|
||||
#define FS_CRYPTO_BLOCK_SIZE 16
|
||||
|
||||
struct fscrypt_info;
|
||||
|
||||
struct fscrypt_ctx {
|
||||
union {
|
||||
struct {
|
||||
struct page *bounce_page; /* Ciphertext page */
|
||||
struct page *control_page; /* Original page */
|
||||
} w;
|
||||
struct {
|
||||
struct bio *bio;
|
||||
struct work_struct work;
|
||||
} r;
|
||||
struct list_head free_list; /* Free list */
|
||||
};
|
||||
u8 flags; /* Flags */
|
||||
};
|
||||
|
||||
/**
|
||||
* For encrypted symlinks, the ciphertext length is stored at the beginning
|
||||
* of the string in little-endian format.
|
||||
*/
|
||||
struct fscrypt_symlink_data {
|
||||
__le16 len;
|
||||
char encrypted_path[1];
|
||||
} __packed;
|
||||
|
||||
struct fscrypt_str {
|
||||
unsigned char *name;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
struct fscrypt_name {
|
||||
const struct qstr *usr_fname;
|
||||
struct fscrypt_str disk_name;
|
||||
u32 hash;
|
||||
u32 minor_hash;
|
||||
struct fscrypt_str crypto_buf;
|
||||
};
|
||||
|
||||
#define FSTR_INIT(n, l) { .name = n, .len = l }
|
||||
#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
|
||||
#define fname_name(p) ((p)->disk_name.name)
|
||||
#define fname_len(p) ((p)->disk_name.len)
|
||||
|
||||
/*
|
||||
* fscrypt superblock flags
|
||||
*/
|
||||
#define FS_CFLG_OWN_PAGES (1U << 1)
|
||||
|
||||
/*
|
||||
* crypto opertions for filesystems
|
||||
*/
|
||||
struct fscrypt_operations {
|
||||
unsigned int flags;
|
||||
const char *key_prefix;
|
||||
int (*get_context)(struct inode *, void *, size_t);
|
||||
int (*set_context)(struct inode *, const void *, size_t, void *);
|
||||
bool (*dummy_context)(struct inode *);
|
||||
bool (*empty_dir)(struct inode *);
|
||||
unsigned (*max_namelen)(struct inode *);
|
||||
};
|
||||
|
||||
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
|
||||
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
|
||||
|
||||
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
|
||||
{
|
||||
if (inode->i_sb->s_cop->dummy_context &&
|
||||
inode->i_sb->s_cop->dummy_context(inode))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
||||
u32 filenames_mode)
|
||||
{
|
||||
if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
|
||||
filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
|
||||
return true;
|
||||
|
||||
if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
|
||||
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
|
||||
{
|
||||
if (str->len == 1 && str->name[0] == '.')
|
||||
return true;
|
||||
|
||||
if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if __FS_HAS_ENCRYPTION
|
||||
|
||||
static inline struct page *fscrypt_control_page(struct page *page)
|
||||
{
|
||||
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
|
||||
{
|
||||
return (inode->i_crypt_info != NULL);
|
||||
}
|
||||
|
||||
#include <linux/fscrypt_supp.h>
|
||||
|
||||
#else /* !__FS_HAS_ENCRYPTION */
|
||||
|
||||
static inline struct page *fscrypt_control_page(struct page *page)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include <linux/fscrypt_notsupp.h>
|
||||
#endif /* __FS_HAS_ENCRYPTION */
|
||||
|
||||
/**
|
||||
* fscrypt_require_key - require an inode's encryption key
|
||||
* @inode: the inode we need the key for
|
||||
*
|
||||
* If the inode is encrypted, set up its encryption key if not already done.
|
||||
* Then require that the key be present and return -ENOKEY otherwise.
|
||||
*
|
||||
* No locks are needed, and the key will live as long as the struct inode --- so
|
||||
* it won't go away from under you.
|
||||
*
|
||||
* Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
|
||||
* if a problem occurred while setting up the encryption key.
|
||||
*/
|
||||
static inline int fscrypt_require_key(struct inode *inode)
|
||||
{
|
||||
if (IS_ENCRYPTED(inode)) {
|
||||
int err = fscrypt_get_encryption_info(inode);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (!fscrypt_has_encryption_key(inode))
|
||||
return -ENOKEY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory
|
||||
* @old_dentry: an existing dentry for the inode being linked
|
||||
* @dir: the target directory
|
||||
* @dentry: negative dentry for the target filename
|
||||
*
|
||||
* A new link can only be added to an encrypted directory if the directory's
|
||||
* encryption key is available --- since otherwise we'd have no way to encrypt
|
||||
* the filename. Therefore, we first set up the directory's encryption key (if
|
||||
* not already done) and return an error if it's unavailable.
|
||||
*
|
||||
* We also verify that the link will not violate the constraint that all files
|
||||
* in an encrypted directory tree use the same encryption policy.
|
||||
*
|
||||
* Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
|
||||
* -EPERM if the link would result in an inconsistent encryption policy, or
|
||||
* another -errno code.
|
||||
*/
|
||||
static inline int fscrypt_prepare_link(struct dentry *old_dentry,
|
||||
struct inode *dir,
|
||||
struct dentry *dentry)
|
||||
{
|
||||
if (IS_ENCRYPTED(dir))
|
||||
return __fscrypt_prepare_link(d_inode(old_dentry), dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories
|
||||
* @old_dir: source directory
|
||||
* @old_dentry: dentry for source file
|
||||
* @new_dir: target directory
|
||||
* @new_dentry: dentry for target location (may be negative unless exchanging)
|
||||
* @flags: rename flags (we care at least about %RENAME_EXCHANGE)
|
||||
*
|
||||
* Prepare for ->rename() where the source and/or target directories may be
|
||||
* encrypted. A new link can only be added to an encrypted directory if the
|
||||
* directory's encryption key is available --- since otherwise we'd have no way
|
||||
* to encrypt the filename. A rename to an existing name, on the other hand,
|
||||
* *is* cryptographically possible without the key. However, we take the more
|
||||
* conservative approach and just forbid all no-key renames.
|
||||
*
|
||||
* We also verify that the rename will not violate the constraint that all files
|
||||
* in an encrypted directory tree use the same encryption policy.
|
||||
*
|
||||
* Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
|
||||
* rename would cause inconsistent encryption policies, or another -errno code.
|
||||
*/
|
||||
static inline int fscrypt_prepare_rename(struct inode *old_dir,
|
||||
struct dentry *old_dentry,
|
||||
struct inode *new_dir,
|
||||
struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir))
|
||||
return __fscrypt_prepare_rename(old_dir, old_dentry,
|
||||
new_dir, new_dentry, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory
|
||||
* @dir: directory being searched
|
||||
* @dentry: filename being looked up
|
||||
* @flags: lookup flags
|
||||
*
|
||||
* Prepare for ->lookup() in a directory which may be encrypted. Lookups can be
|
||||
* done with or without the directory's encryption key; without the key,
|
||||
* filenames are presented in encrypted form. Therefore, we'll try to set up
|
||||
* the directory's encryption key, but even without it the lookup can continue.
|
||||
*
|
||||
* To allow invalidating stale dentries if the directory's encryption key is
|
||||
* added later, we also install a custom ->d_revalidate() method and use the
|
||||
* DCACHE_ENCRYPTED_WITH_KEY flag to indicate whether a given dentry is a
|
||||
* plaintext name (flag set) or a ciphertext name (flag cleared).
|
||||
*
|
||||
* Return: 0 on success, -errno if a problem occurred while setting up the
|
||||
* encryption key
|
||||
*/
|
||||
static inline int fscrypt_prepare_lookup(struct inode *dir,
|
||||
struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
if (IS_ENCRYPTED(dir))
|
||||
return __fscrypt_prepare_lookup(dir, dentry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes
|
||||
* @dentry: dentry through which the inode is being changed
|
||||
* @attr: attributes to change
|
||||
*
|
||||
* Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file,
|
||||
* most attribute changes are allowed even without the encryption key. However,
|
||||
* without the encryption key we do have to forbid truncates. This is needed
|
||||
* because the size being truncated to may not be a multiple of the filesystem
|
||||
* block size, and in that case we'd have to decrypt the final block, zero the
|
||||
* portion past i_size, and re-encrypt it. (We *could* allow truncating to a
|
||||
* filesystem block boundary, but it's simpler to just forbid all truncates ---
|
||||
* and we already forbid all other contents modifications without the key.)
|
||||
*
|
||||
* Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
|
||||
* if a problem occurred while setting up the encryption key.
|
||||
*/
|
||||
static inline int fscrypt_prepare_setattr(struct dentry *dentry,
|
||||
struct iattr *attr)
|
||||
{
|
||||
if (attr->ia_valid & ATTR_SIZE)
|
||||
return fscrypt_require_key(d_inode(dentry));
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FSCRYPT_H */
|
@@ -1,142 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* fscrypt_common.h: common declarations for per-file encryption
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
*
|
||||
* Written by Michael Halcrow, 2015.
|
||||
* Modified by Jaegeuk Kim, 2015.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FSCRYPT_COMMON_H
|
||||
#define _LINUX_FSCRYPT_COMMON_H
|
||||
|
||||
#include <linux/key.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
|
||||
#define FS_CRYPTO_BLOCK_SIZE 16
|
||||
|
||||
struct fscrypt_info;
|
||||
|
||||
struct fscrypt_ctx {
|
||||
union {
|
||||
struct {
|
||||
struct page *bounce_page; /* Ciphertext page */
|
||||
struct page *control_page; /* Original page */
|
||||
} w;
|
||||
struct {
|
||||
struct bio *bio;
|
||||
struct work_struct work;
|
||||
} r;
|
||||
struct list_head free_list; /* Free list */
|
||||
};
|
||||
u8 flags; /* Flags */
|
||||
};
|
||||
|
||||
/**
|
||||
* For encrypted symlinks, the ciphertext length is stored at the beginning
|
||||
* of the string in little-endian format.
|
||||
*/
|
||||
struct fscrypt_symlink_data {
|
||||
__le16 len;
|
||||
char encrypted_path[1];
|
||||
} __packed;
|
||||
|
||||
struct fscrypt_str {
|
||||
unsigned char *name;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
struct fscrypt_name {
|
||||
const struct qstr *usr_fname;
|
||||
struct fscrypt_str disk_name;
|
||||
u32 hash;
|
||||
u32 minor_hash;
|
||||
struct fscrypt_str crypto_buf;
|
||||
};
|
||||
|
||||
#define FSTR_INIT(n, l) { .name = n, .len = l }
|
||||
#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
|
||||
#define fname_name(p) ((p)->disk_name.name)
|
||||
#define fname_len(p) ((p)->disk_name.len)
|
||||
|
||||
/*
|
||||
* fscrypt superblock flags
|
||||
*/
|
||||
#define FS_CFLG_OWN_PAGES (1U << 1)
|
||||
|
||||
/*
|
||||
* crypto opertions for filesystems
|
||||
*/
|
||||
struct fscrypt_operations {
|
||||
unsigned int flags;
|
||||
const char *key_prefix;
|
||||
int (*get_context)(struct inode *, void *, size_t);
|
||||
int (*set_context)(struct inode *, const void *, size_t, void *);
|
||||
bool (*dummy_context)(struct inode *);
|
||||
bool (*is_encrypted)(struct inode *);
|
||||
bool (*empty_dir)(struct inode *);
|
||||
unsigned (*max_namelen)(struct inode *);
|
||||
};
|
||||
|
||||
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
|
||||
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
|
||||
|
||||
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
|
||||
{
|
||||
if (inode->i_sb->s_cop->dummy_context &&
|
||||
inode->i_sb->s_cop->dummy_context(inode))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
||||
u32 filenames_mode)
|
||||
{
|
||||
if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
|
||||
filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
|
||||
return true;
|
||||
|
||||
if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
|
||||
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
|
||||
{
|
||||
if (str->len == 1 && str->name[0] == '.')
|
||||
return true;
|
||||
|
||||
if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_control_page(struct page *page)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
|
||||
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
|
||||
#else
|
||||
WARN_ON_ONCE(1);
|
||||
return ERR_PTR(-EINVAL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int fscrypt_has_encryption_key(const struct inode *inode)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
|
||||
return (inode->i_crypt_info != NULL);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FSCRYPT_COMMON_H */
|
@@ -4,13 +4,16 @@
|
||||
*
|
||||
* This stubs out the fscrypt functions for filesystems configured without
|
||||
* encryption support.
|
||||
*
|
||||
* Do not include this file directly. Use fscrypt.h instead!
|
||||
*/
|
||||
#ifndef _LINUX_FSCRYPT_H
|
||||
#error "Incorrect include of linux/fscrypt_notsupp.h!"
|
||||
#endif
|
||||
|
||||
#ifndef _LINUX_FSCRYPT_NOTSUPP_H
|
||||
#define _LINUX_FSCRYPT_NOTSUPP_H
|
||||
|
||||
#include <linux/fscrypt_common.h>
|
||||
|
||||
/* crypto.c */
|
||||
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
|
||||
gfp_t gfp_flags)
|
||||
@@ -98,7 +101,7 @@ static inline int fscrypt_setup_filename(struct inode *dir,
|
||||
const struct qstr *iname,
|
||||
int lookup, struct fscrypt_name *fname)
|
||||
{
|
||||
if (dir->i_sb->s_cop->is_encrypted(dir))
|
||||
if (IS_ENCRYPTED(dir))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memset(fname, 0, sizeof(struct fscrypt_name));
|
||||
@@ -175,4 +178,34 @@ static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* hooks.c */
|
||||
|
||||
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
if (IS_ENCRYPTED(inode))
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __fscrypt_prepare_link(struct inode *inode,
|
||||
struct inode *dir)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int __fscrypt_prepare_rename(struct inode *old_dir,
|
||||
struct dentry *old_dentry,
|
||||
struct inode *new_dir,
|
||||
struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int __fscrypt_prepare_lookup(struct inode *dir,
|
||||
struct dentry *dentry)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
|
||||
|
@@ -2,14 +2,15 @@
|
||||
/*
|
||||
* fscrypt_supp.h
|
||||
*
|
||||
* This is included by filesystems configured with encryption support.
|
||||
* Do not include this file directly. Use fscrypt.h instead!
|
||||
*/
|
||||
#ifndef _LINUX_FSCRYPT_H
|
||||
#error "Incorrect include of linux/fscrypt_supp.h!"
|
||||
#endif
|
||||
|
||||
#ifndef _LINUX_FSCRYPT_SUPP_H
|
||||
#define _LINUX_FSCRYPT_SUPP_H
|
||||
|
||||
#include <linux/fscrypt_common.h>
|
||||
|
||||
/* crypto.c */
|
||||
extern struct kmem_cache *fscrypt_info_cachep;
|
||||
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
|
||||
@@ -143,4 +144,14 @@ extern void fscrypt_pullback_bio_page(struct page **, bool);
|
||||
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
|
||||
unsigned int);
|
||||
|
||||
/* hooks.c */
|
||||
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
|
||||
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
|
||||
extern int __fscrypt_prepare_rename(struct inode *old_dir,
|
||||
struct dentry *old_dentry,
|
||||
struct inode *new_dir,
|
||||
struct dentry *new_dentry,
|
||||
unsigned int flags);
|
||||
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
|
||||
|
||||
#endif /* _LINUX_FSCRYPT_SUPP_H */
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
/*
|
||||
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
|
||||
@@ -136,7 +137,7 @@ struct fsnotify_group {
|
||||
* inotify_init() and the refcnt will hit 0 only when that fd has been
|
||||
* closed.
|
||||
*/
|
||||
atomic_t refcnt; /* things with interest in this group */
|
||||
refcount_t refcnt; /* things with interest in this group */
|
||||
|
||||
const struct fsnotify_ops *ops; /* how this group handles things */
|
||||
|
||||
@@ -183,14 +184,13 @@ struct fsnotify_group {
|
||||
#endif
|
||||
#ifdef CONFIG_FANOTIFY
|
||||
struct fanotify_group_private_data {
|
||||
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
||||
/* allows a group to block waiting for a userspace response */
|
||||
struct list_head access_list;
|
||||
wait_queue_head_t access_waitq;
|
||||
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
||||
int f_flags;
|
||||
unsigned int max_marks;
|
||||
struct user_struct *user;
|
||||
bool audit;
|
||||
} fanotify_data;
|
||||
#endif /* CONFIG_FANOTIFY */
|
||||
};
|
||||
@@ -244,7 +244,7 @@ struct fsnotify_mark {
|
||||
__u32 mask;
|
||||
/* We hold one for presence in g_list. Also one ref for each 'thing'
|
||||
* in kernel that found and may be using this mark. */
|
||||
atomic_t refcnt;
|
||||
refcount_t refcnt;
|
||||
/* Group this mark is for. Set on mark creation, stable until last ref
|
||||
* is dropped */
|
||||
struct fsnotify_group *group;
|
||||
|
@@ -52,6 +52,30 @@ static inline void early_trace_init(void) { }
|
||||
struct module;
|
||||
struct ftrace_hash;
|
||||
|
||||
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
|
||||
defined(CONFIG_DYNAMIC_FTRACE)
|
||||
const char *
|
||||
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char **modname, char *sym);
|
||||
int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *name,
|
||||
char *module_name, int *exported);
|
||||
#else
|
||||
static inline const char *
|
||||
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char **modname, char *sym)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *name,
|
||||
char *module_name, int *exported)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
extern int ftrace_enabled;
|
||||
@@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
|
||||
* ENABLED - set/unset when ftrace_ops is registered/unregistered
|
||||
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
|
||||
* allocated ftrace_ops which need special care
|
||||
* PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
|
||||
* could be controlled by following calls:
|
||||
* ftrace_function_local_enable
|
||||
* ftrace_function_local_disable
|
||||
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
|
||||
* and passed to the callback. If this flag is set, but the
|
||||
* architecture does not support passing regs
|
||||
@@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
FTRACE_OPS_FL_DYNAMIC = 1 << 1,
|
||||
FTRACE_OPS_FL_PER_CPU = 1 << 2,
|
||||
FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
|
||||
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
|
||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
|
||||
FTRACE_OPS_FL_STUB = 1 << 6,
|
||||
FTRACE_OPS_FL_INITIALIZED = 1 << 7,
|
||||
FTRACE_OPS_FL_DELETED = 1 << 8,
|
||||
FTRACE_OPS_FL_ADDING = 1 << 9,
|
||||
FTRACE_OPS_FL_REMOVING = 1 << 10,
|
||||
FTRACE_OPS_FL_MODIFYING = 1 << 11,
|
||||
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
|
||||
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
|
||||
FTRACE_OPS_FL_PID = 1 << 14,
|
||||
FTRACE_OPS_FL_RCU = 1 << 15,
|
||||
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
|
||||
FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
|
||||
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
|
||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
|
||||
FTRACE_OPS_FL_STUB = 1 << 5,
|
||||
FTRACE_OPS_FL_INITIALIZED = 1 << 6,
|
||||
FTRACE_OPS_FL_DELETED = 1 << 7,
|
||||
FTRACE_OPS_FL_ADDING = 1 << 8,
|
||||
FTRACE_OPS_FL_REMOVING = 1 << 9,
|
||||
FTRACE_OPS_FL_MODIFYING = 1 << 10,
|
||||
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
|
||||
FTRACE_OPS_FL_IPMODIFY = 1 << 12,
|
||||
FTRACE_OPS_FL_PID = 1 << 13,
|
||||
FTRACE_OPS_FL_RCU = 1 << 14,
|
||||
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@@ -152,8 +171,10 @@ struct ftrace_ops_hash {
|
||||
};
|
||||
|
||||
void ftrace_free_init_mem(void);
|
||||
void ftrace_free_mem(struct module *mod, void *start, void *end);
|
||||
#else
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -173,7 +194,6 @@ struct ftrace_ops {
|
||||
unsigned long flags;
|
||||
void *private;
|
||||
ftrace_func_t saved_func;
|
||||
int __percpu *disabled;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
struct ftrace_ops_hash local_hash;
|
||||
struct ftrace_ops_hash *func_hash;
|
||||
@@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops);
|
||||
int unregister_ftrace_function(struct ftrace_ops *ops);
|
||||
void clear_ftrace_function(void);
|
||||
|
||||
/**
|
||||
* ftrace_function_local_enable - enable ftrace_ops on current cpu
|
||||
*
|
||||
* This function enables tracing on current cpu by decreasing
|
||||
* the per cpu control variable.
|
||||
* It must be called with preemption disabled and only on ftrace_ops
|
||||
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
|
||||
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
||||
*/
|
||||
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
|
||||
{
|
||||
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
|
||||
return;
|
||||
|
||||
(*this_cpu_ptr(ops->disabled))--;
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_function_local_disable - disable ftrace_ops on current cpu
|
||||
*
|
||||
* This function disables tracing on current cpu by increasing
|
||||
* the per cpu control variable.
|
||||
* It must be called with preemption disabled and only on ftrace_ops
|
||||
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
|
||||
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
||||
*/
|
||||
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
|
||||
{
|
||||
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
|
||||
return;
|
||||
|
||||
(*this_cpu_ptr(ops->disabled))++;
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_function_local_disabled - returns ftrace_ops disabled value
|
||||
* on current cpu
|
||||
*
|
||||
* This function returns value of ftrace_ops::disabled on current cpu.
|
||||
* It must be called with preemption disabled and only on ftrace_ops
|
||||
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
|
||||
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
||||
*/
|
||||
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
|
||||
{
|
||||
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
|
||||
return *this_cpu_ptr(ops->disabled);
|
||||
}
|
||||
|
||||
extern void ftrace_stub(unsigned long a0, unsigned long a1,
|
||||
struct ftrace_ops *op, struct pt_regs *regs);
|
||||
|
||||
@@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void)
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_STACK_TRACER
|
||||
@@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void)
|
||||
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_TRACER
|
||||
#if defined(CONFIG_PREEMPT_TRACER) || \
|
||||
(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
|
||||
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
|
||||
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
|
||||
#else
|
||||
|
@@ -68,7 +68,7 @@ struct fwnode_reference_args {
|
||||
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
|
||||
*/
|
||||
struct fwnode_operations {
|
||||
void (*get)(struct fwnode_handle *fwnode);
|
||||
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
|
||||
void (*put)(struct fwnode_handle *fwnode);
|
||||
bool (*device_is_available)(const struct fwnode_handle *fwnode);
|
||||
bool (*property_present)(const struct fwnode_handle *fwnode,
|
||||
|
@@ -32,6 +32,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
struct device;
|
||||
struct device_node;
|
||||
@@ -71,7 +72,7 @@ struct gen_pool {
|
||||
*/
|
||||
struct gen_pool_chunk {
|
||||
struct list_head next_chunk; /* next chunk in pool */
|
||||
atomic_t avail;
|
||||
atomic_long_t avail;
|
||||
phys_addr_t phys_addr; /* physical starting address of memory chunk */
|
||||
unsigned long start_addr; /* start address of memory chunk */
|
||||
unsigned long end_addr; /* end address of memory chunk (inclusive) */
|
||||
|
@@ -141,6 +141,7 @@ struct hd_struct {
|
||||
#define GENHD_FL_NATIVE_CAPACITY 128
|
||||
#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
|
||||
#define GENHD_FL_NO_PART_SCAN 512
|
||||
#define GENHD_FL_HIDDEN 1024
|
||||
|
||||
enum {
|
||||
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
|
||||
@@ -236,7 +237,7 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
|
||||
|
||||
static inline dev_t disk_devt(struct gendisk *disk)
|
||||
{
|
||||
return disk_to_dev(disk)->devt;
|
||||
return MKDEV(disk->major, disk->first_minor);
|
||||
}
|
||||
|
||||
static inline dev_t part_devt(struct hd_struct *part)
|
||||
@@ -244,6 +245,7 @@ static inline dev_t part_devt(struct hd_struct *part)
|
||||
return part_to_dev(part)->devt;
|
||||
}
|
||||
|
||||
extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
|
||||
extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
|
||||
|
||||
static inline void disk_put_part(struct hd_struct *part)
|
||||
|
@@ -24,7 +24,6 @@ struct vm_area_struct;
|
||||
#define ___GFP_HIGH 0x20u
|
||||
#define ___GFP_IO 0x40u
|
||||
#define ___GFP_FS 0x80u
|
||||
#define ___GFP_COLD 0x100u
|
||||
#define ___GFP_NOWARN 0x200u
|
||||
#define ___GFP_RETRY_MAYFAIL 0x400u
|
||||
#define ___GFP_NOFAIL 0x800u
|
||||
@@ -37,7 +36,6 @@ struct vm_area_struct;
|
||||
#define ___GFP_THISNODE 0x40000u
|
||||
#define ___GFP_ATOMIC 0x80000u
|
||||
#define ___GFP_ACCOUNT 0x100000u
|
||||
#define ___GFP_NOTRACK 0x200000u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400000u
|
||||
#define ___GFP_WRITE 0x800000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
|
||||
@@ -193,27 +191,15 @@ struct vm_area_struct;
|
||||
/*
|
||||
* Action modifiers
|
||||
*
|
||||
* __GFP_COLD indicates that the caller does not expect to be used in the near
|
||||
* future. Where possible, a cache-cold page will be returned.
|
||||
*
|
||||
* __GFP_NOWARN suppresses allocation failure reports.
|
||||
*
|
||||
* __GFP_COMP address compound page metadata.
|
||||
*
|
||||
* __GFP_ZERO returns a zeroed page on success.
|
||||
*
|
||||
* __GFP_NOTRACK avoids tracking with kmemcheck.
|
||||
*
|
||||
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
|
||||
* distinguishing in the source between false positives and allocations that
|
||||
* cannot be supported (e.g. page tables).
|
||||
*/
|
||||
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
|
||||
/* Disable lockdep for GFP context tracking */
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
@@ -539,8 +525,8 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
|
||||
extern void __free_pages(struct page *page, unsigned int order);
|
||||
extern void free_pages(unsigned long addr, unsigned int order);
|
||||
extern void free_hot_cold_page(struct page *page, bool cold);
|
||||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
extern void free_unref_page(struct page *page);
|
||||
extern void free_unref_page_list(struct list_head *list);
|
||||
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
|
||||
|
@@ -29,6 +29,7 @@ struct gpio_descs {
|
||||
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
|
||||
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
|
||||
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
|
||||
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
|
||||
|
||||
/**
|
||||
* Optional flags that can be passed to one of gpiod_* to configure direction
|
||||
@@ -40,6 +41,11 @@ enum gpiod_flags {
|
||||
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
|
||||
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
|
||||
GPIOD_FLAGS_BIT_DIR_VAL,
|
||||
GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
|
||||
GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
|
||||
GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
|
||||
GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
@@ -100,10 +106,15 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
|
||||
|
||||
/* Value get/set from non-sleeping context */
|
||||
int gpiod_get_value(const struct gpio_desc *desc);
|
||||
int gpiod_get_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array, int *value_array);
|
||||
void gpiod_set_value(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array, int *value_array);
|
||||
int gpiod_get_raw_value(const struct gpio_desc *desc);
|
||||
int gpiod_get_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -111,11 +122,17 @@ void gpiod_set_raw_array_value(unsigned int array_size,
|
||||
|
||||
/* Value get/set from sleeping context */
|
||||
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
|
||||
int gpiod_get_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
|
||||
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array);
|
||||
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
|
||||
void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -306,6 +323,14 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
@@ -324,6 +349,14 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
@@ -343,6 +376,14 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
@@ -361,6 +402,14 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
int *value_array)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
|
||||
int value)
|
||||
{
|
||||
|
@@ -20,6 +20,131 @@ struct module;
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
|
||||
#ifdef CONFIG_GPIOLIB_IRQCHIP
|
||||
/**
|
||||
* struct gpio_irq_chip - GPIO interrupt controller
|
||||
*/
|
||||
struct gpio_irq_chip {
|
||||
/**
|
||||
* @chip:
|
||||
*
|
||||
* GPIO IRQ chip implementation, provided by GPIO driver.
|
||||
*/
|
||||
struct irq_chip *chip;
|
||||
|
||||
/**
|
||||
* @domain:
|
||||
*
|
||||
* Interrupt translation domain; responsible for mapping between GPIO
|
||||
* hwirq number and Linux IRQ number.
|
||||
*/
|
||||
struct irq_domain *domain;
|
||||
|
||||
/**
|
||||
* @domain_ops:
|
||||
*
|
||||
* Table of interrupt domain operations for this IRQ chip.
|
||||
*/
|
||||
const struct irq_domain_ops *domain_ops;
|
||||
|
||||
/**
|
||||
* @handler:
|
||||
*
|
||||
* The IRQ handler to use (often a predefined IRQ core function) for
|
||||
* GPIO IRQs, provided by GPIO driver.
|
||||
*/
|
||||
irq_flow_handler_t handler;
|
||||
|
||||
/**
|
||||
* @default_type:
|
||||
*
|
||||
* Default IRQ triggering type applied during GPIO driver
|
||||
* initialization, provided by GPIO driver.
|
||||
*/
|
||||
unsigned int default_type;
|
||||
|
||||
/**
|
||||
* @lock_key:
|
||||
*
|
||||
* Per GPIO IRQ chip lockdep class.
|
||||
*/
|
||||
struct lock_class_key *lock_key;
|
||||
|
||||
/**
|
||||
* @parent_handler:
|
||||
*
|
||||
* The interrupt handler for the GPIO chip's parent interrupts, may be
|
||||
* NULL if the parent interrupts are nested rather than cascaded.
|
||||
*/
|
||||
irq_flow_handler_t parent_handler;
|
||||
|
||||
/**
|
||||
* @parent_handler_data:
|
||||
*
|
||||
* Data associated, and passed to, the handler for the parent
|
||||
* interrupt.
|
||||
*/
|
||||
void *parent_handler_data;
|
||||
|
||||
/**
|
||||
* @num_parents:
|
||||
*
|
||||
* The number of interrupt parents of a GPIO chip.
|
||||
*/
|
||||
unsigned int num_parents;
|
||||
|
||||
/**
|
||||
* @parents:
|
||||
*
|
||||
* A list of interrupt parents of a GPIO chip. This is owned by the
|
||||
* driver, so the core will only reference this list, not modify it.
|
||||
*/
|
||||
unsigned int *parents;
|
||||
|
||||
/**
|
||||
* @map:
|
||||
*
|
||||
* A list of interrupt parents for each line of a GPIO chip.
|
||||
*/
|
||||
unsigned int *map;
|
||||
|
||||
/**
|
||||
* @threaded:
|
||||
*
|
||||
* True if set the interrupt handling uses nested threads.
|
||||
*/
|
||||
bool threaded;
|
||||
|
||||
/**
|
||||
* @need_valid_mask:
|
||||
*
|
||||
* If set core allocates @valid_mask with all bits set to one.
|
||||
*/
|
||||
bool need_valid_mask;
|
||||
|
||||
/**
|
||||
* @valid_mask:
|
||||
*
|
||||
* If not %NULL holds bitmask of GPIOs which are valid to be included
|
||||
* in IRQ domain of the chip.
|
||||
*/
|
||||
unsigned long *valid_mask;
|
||||
|
||||
/**
|
||||
* @first:
|
||||
*
|
||||
* Required for static IRQ allocation. If set, irq_domain_add_simple()
|
||||
* will allocate and map all IRQs during initialization.
|
||||
*/
|
||||
unsigned int first;
|
||||
};
|
||||
|
||||
static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
|
||||
{
|
||||
return container_of(chip, struct gpio_irq_chip, chip);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct gpio_chip - abstract a GPIO controller
|
||||
* @label: a functional name for the GPIO device, such as a part
|
||||
@@ -36,6 +161,8 @@ struct module;
|
||||
* @direction_input: configures signal "offset" as input, or returns error
|
||||
* @direction_output: configures signal "offset" as output, or returns error
|
||||
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
|
||||
* @get_multiple: reads values for multiple signals defined by "mask" and
|
||||
* stores them in "bits", returns 0 on success or negative error
|
||||
* @set: assigns output value for signal "offset"
|
||||
* @set_multiple: assigns output values for multiple signals defined by "mask"
|
||||
* @set_config: optional hook for all kinds of settings. Uses the same
|
||||
@@ -66,9 +193,9 @@ struct module;
|
||||
* registers.
|
||||
* @read_reg: reader function for generic GPIO
|
||||
* @write_reg: writer function for generic GPIO
|
||||
* @pin2mask: some generic GPIO controllers work with the big-endian bits
|
||||
* notation, e.g. in a 8-bits register, GPIO7 is the least significant
|
||||
* bit. This callback assigns the right bit mask.
|
||||
* @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing
|
||||
* line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the
|
||||
* generic GPIO core. It is for internal housekeeping only.
|
||||
* @reg_dat: data (in) register for generic GPIO
|
||||
* @reg_set: output set register (out=high) for generic GPIO
|
||||
* @reg_clr: output clear register (out=low) for generic GPIO
|
||||
@@ -81,23 +208,6 @@ struct module;
|
||||
* safely.
|
||||
* @bgpio_dir: shadowed direction register for generic GPIO to clear/set
|
||||
* direction safely.
|
||||
* @irqchip: GPIO IRQ chip impl, provided by GPIO driver
|
||||
* @irqdomain: Interrupt translation domain; responsible for mapping
|
||||
* between GPIO hwirq number and linux irq number
|
||||
* @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
|
||||
* @irq_handler: the irq handler to use (often a predefined irq core function)
|
||||
* for GPIO IRQs, provided by GPIO driver
|
||||
* @irq_default_type: default IRQ triggering type applied during GPIO driver
|
||||
* initialization, provided by GPIO driver
|
||||
* @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
|
||||
* provided by GPIO driver for chained interrupt (not for nested
|
||||
* interrupts).
|
||||
* @irq_nested: True if set the interrupt handling is nested.
|
||||
* @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
|
||||
* bits set to one
|
||||
* @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
|
||||
* be included in IRQ domain of the chip
|
||||
* @lock_key: per GPIO IRQ chip lockdep class
|
||||
*
|
||||
* A gpio_chip can help platforms abstract various sources of GPIOs so
|
||||
* they can all be accessed through a common programing interface.
|
||||
@@ -127,6 +237,9 @@ struct gpio_chip {
|
||||
unsigned offset, int value);
|
||||
int (*get)(struct gpio_chip *chip,
|
||||
unsigned offset);
|
||||
int (*get_multiple)(struct gpio_chip *chip,
|
||||
unsigned long *mask,
|
||||
unsigned long *bits);
|
||||
void (*set)(struct gpio_chip *chip,
|
||||
unsigned offset, int value);
|
||||
void (*set_multiple)(struct gpio_chip *chip,
|
||||
@@ -148,7 +261,7 @@ struct gpio_chip {
|
||||
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
|
||||
unsigned long (*read_reg)(void __iomem *reg);
|
||||
void (*write_reg)(void __iomem *reg, unsigned long data);
|
||||
unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin);
|
||||
bool be_bits;
|
||||
void __iomem *reg_dat;
|
||||
void __iomem *reg_set;
|
||||
void __iomem *reg_clr;
|
||||
@@ -164,16 +277,14 @@ struct gpio_chip {
|
||||
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
|
||||
* to handle IRQs for most practical cases.
|
||||
*/
|
||||
struct irq_chip *irqchip;
|
||||
struct irq_domain *irqdomain;
|
||||
unsigned int irq_base;
|
||||
irq_flow_handler_t irq_handler;
|
||||
unsigned int irq_default_type;
|
||||
unsigned int irq_chained_parent;
|
||||
bool irq_nested;
|
||||
bool irq_need_valid_mask;
|
||||
unsigned long *irq_valid_mask;
|
||||
struct lock_class_key *lock_key;
|
||||
|
||||
/**
|
||||
* @irq:
|
||||
*
|
||||
* Integrates interrupt chip functionality with the GPIO chip. Can be
|
||||
* used to handle IRQs for most practical cases.
|
||||
*/
|
||||
struct gpio_irq_chip irq;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF_GPIO)
|
||||
@@ -211,7 +322,41 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
|
||||
unsigned offset);
|
||||
|
||||
/* add/remove chips */
|
||||
extern int gpiochip_add_data(struct gpio_chip *chip, void *data);
|
||||
extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
struct lock_class_key *lock_key);
|
||||
|
||||
/**
|
||||
* gpiochip_add_data() - register a gpio_chip
|
||||
* @chip: the chip to register, with chip->base initialized
|
||||
* @data: driver-private data associated with this chip
|
||||
*
|
||||
* Context: potentially before irqs will work
|
||||
*
|
||||
* When gpiochip_add_data() is called very early during boot, so that GPIOs
|
||||
* can be freely used, the chip->parent device must be registered before
|
||||
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
|
||||
* for GPIOs will fail rudely.
|
||||
*
|
||||
* gpiochip_add_data() must only be called after gpiolib initialization,
|
||||
* ie after core_initcall().
|
||||
*
|
||||
* If chip->base is negative, this requests dynamic assignment of
|
||||
* a range of valid GPIOs.
|
||||
*
|
||||
* Returns:
|
||||
* A negative errno if the chip can't be registered, such as because the
|
||||
* chip->base is invalid or already associated with a different chip.
|
||||
* Otherwise it returns zero as a success code.
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define gpiochip_add_data(chip, data) ({ \
|
||||
static struct lock_class_key key; \
|
||||
gpiochip_add_data_with_key(chip, data, &key); \
|
||||
})
|
||||
#else
|
||||
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
|
||||
#endif
|
||||
|
||||
static inline int gpiochip_add(struct gpio_chip *chip)
|
||||
{
|
||||
return gpiochip_add_data(chip, NULL);
|
||||
@@ -265,6 +410,10 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
|
||||
|
||||
#ifdef CONFIG_GPIOLIB_IRQCHIP
|
||||
|
||||
int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq);
|
||||
void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
|
||||
|
||||
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int parent_irq,
|
||||
@@ -279,7 +428,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
bool threaded,
|
||||
struct lock_class_key *lock_key);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
@@ -11,7 +11,7 @@ enum gpio_lookup_flags {
|
||||
GPIO_OPEN_DRAIN = (1 << 1),
|
||||
GPIO_OPEN_SOURCE = (1 << 2),
|
||||
GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
|
||||
GPIO_SLEEP_MAY_LOOSE_VALUE = (1 << 3),
|
||||
GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Driver for simulating a mouse on GPIO lines.
|
||||
*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _GPIO_MOUSE_H
|
||||
#define _GPIO_MOUSE_H
|
||||
|
||||
#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
|
||||
#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
|
||||
|
||||
#define GPIO_MOUSE_PIN_UP 0
|
||||
#define GPIO_MOUSE_PIN_DOWN 1
|
||||
#define GPIO_MOUSE_PIN_LEFT 2
|
||||
#define GPIO_MOUSE_PIN_RIGHT 3
|
||||
#define GPIO_MOUSE_PIN_BLEFT 4
|
||||
#define GPIO_MOUSE_PIN_BMIDDLE 5
|
||||
#define GPIO_MOUSE_PIN_BRIGHT 6
|
||||
#define GPIO_MOUSE_PIN_MAX 7
|
||||
|
||||
/**
|
||||
* struct gpio_mouse_platform_data
|
||||
* @scan_ms: integer in ms specifying the scan periode.
|
||||
* @polarity: Pin polarity, active high or low.
|
||||
* @up: GPIO line for up value.
|
||||
* @down: GPIO line for down value.
|
||||
* @left: GPIO line for left value.
|
||||
* @right: GPIO line for right value.
|
||||
* @bleft: GPIO line for left button.
|
||||
* @bmiddle: GPIO line for middle button.
|
||||
* @bright: GPIO line for right button.
|
||||
*
|
||||
* This struct must be added to the platform_device in the board code.
|
||||
* It is used by the gpio_mouse driver to setup GPIO lines and to
|
||||
* calculate mouse movement.
|
||||
*/
|
||||
struct gpio_mouse_platform_data {
|
||||
int scan_ms;
|
||||
int polarity;
|
||||
|
||||
union {
|
||||
struct {
|
||||
int up;
|
||||
int down;
|
||||
int left;
|
||||
int right;
|
||||
|
||||
int bleft;
|
||||
int bmiddle;
|
||||
int bright;
|
||||
};
|
||||
int pins[GPIO_MOUSE_PIN_MAX];
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* _GPIO_MOUSE_H */
|
@@ -231,6 +231,7 @@ struct hid_sensor_common {
|
||||
unsigned usage_id;
|
||||
atomic_t data_ready;
|
||||
atomic_t user_requested_state;
|
||||
atomic_t runtime_pm_enable;
|
||||
int poll_interval;
|
||||
int raw_hystersis;
|
||||
int latency_ms;
|
||||
|
@@ -289,6 +289,7 @@ struct hid_item {
|
||||
#define HID_DG_DEVICEINDEX 0x000d0053
|
||||
#define HID_DG_CONTACTCOUNT 0x000d0054
|
||||
#define HID_DG_CONTACTMAX 0x000d0055
|
||||
#define HID_DG_SCANTIME 0x000d0056
|
||||
#define HID_DG_BUTTONTYPE 0x000d0059
|
||||
#define HID_DG_BARRELSWITCH2 0x000d005a
|
||||
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
|
||||
@@ -753,6 +754,7 @@ struct hid_driver {
|
||||
* @stop: called on remove
|
||||
* @open: called by input layer on open
|
||||
* @close: called by input layer on close
|
||||
* @power: request underlying hardware to enter requested power mode
|
||||
* @parse: this method is called only once to parse the device data,
|
||||
* shouldn't allocate anything to not leak memory
|
||||
* @request: send report request to device (e.g. feature report)
|
||||
|
@@ -471,9 +471,9 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
|
||||
* @page: pointer to struct page
|
||||
* Return: driver data value
|
||||
*/
|
||||
static inline unsigned long hmm_devmem_page_get_drvdata(struct page *page)
|
||||
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
|
||||
{
|
||||
unsigned long *drvdata = (unsigned long *)&page->pgmap;
|
||||
const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
|
||||
|
||||
return drvdata[1];
|
||||
}
|
||||
|
@@ -157,7 +157,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp);
|
||||
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
|
||||
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
|
||||
u32 *value);
|
||||
struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
|
||||
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
|
||||
unsigned long flags);
|
||||
void host1x_syncpt_free(struct host1x_syncpt *sp);
|
||||
|
||||
|
@@ -719,6 +719,10 @@ struct vmbus_channel {
|
||||
|
||||
struct vmbus_close_msg close_msg;
|
||||
|
||||
/* Statistics */
|
||||
u64 interrupts; /* Host to Guest interrupts */
|
||||
u64 sig_events; /* Guest to Host events */
|
||||
|
||||
/* Channel callback's invoked in softirq context */
|
||||
struct tasklet_struct callback_event;
|
||||
void (*onchannel_callback)(void *context);
|
||||
@@ -828,6 +832,11 @@ struct vmbus_channel {
|
||||
*/
|
||||
struct rcu_head rcu;
|
||||
|
||||
/*
|
||||
* For sysfs per-channel properties.
|
||||
*/
|
||||
struct kobject kobj;
|
||||
|
||||
/*
|
||||
* For performance critical channels (storage, networking
|
||||
* etc,), Hyper-V has a mechanism to enhance the throughput
|
||||
@@ -1089,6 +1098,7 @@ struct hv_device {
|
||||
struct device device;
|
||||
|
||||
struct vmbus_channel *channel;
|
||||
struct kset *channels_kset;
|
||||
};
|
||||
|
||||
|
||||
|
@@ -12,8 +12,6 @@
|
||||
|
||||
/**
|
||||
* struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio
|
||||
* @sda_pin: GPIO pin ID to use for SDA
|
||||
* @scl_pin: GPIO pin ID to use for SCL
|
||||
* @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz
|
||||
* @timeout: clock stretching timeout in jiffies. If the slave keeps
|
||||
* SCL low for longer than this, the transfer will time out.
|
||||
@@ -26,8 +24,6 @@
|
||||
* @scl_is_output_only: SCL output drivers cannot be turned off.
|
||||
*/
|
||||
struct i2c_gpio_platform_data {
|
||||
unsigned int sda_pin;
|
||||
unsigned int scl_pin;
|
||||
int udelay;
|
||||
int timeout;
|
||||
unsigned int sda_is_open_drain:1;
|
||||
|
@@ -42,7 +42,6 @@
|
||||
* properly set.
|
||||
*/
|
||||
struct i2c_smbus_alert_setup {
|
||||
unsigned int alert_edge_triggered:1;
|
||||
int irq;
|
||||
};
|
||||
|
||||
@@ -50,4 +49,13 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
|
||||
struct i2c_smbus_alert_setup *setup);
|
||||
int i2c_handle_smbus_alert(struct i2c_client *ara);
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
|
||||
int of_i2c_setup_smbus_alert(struct i2c_adapter *adap);
|
||||
#else
|
||||
static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_I2C_SMBUS_H */
|
||||
|
@@ -304,6 +304,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
|
||||
* @type: chip type, to initialize i2c_client.name
|
||||
* @flags: to initialize i2c_client.flags
|
||||
* @addr: stored in i2c_client.addr
|
||||
* @dev_name: Overrides the default <busnr>-<addr> dev_name if set
|
||||
* @platform_data: stored in i2c_client.dev.platform_data
|
||||
* @archdata: copied into i2c_client.dev.archdata
|
||||
* @of_node: pointer to OpenFirmware device node
|
||||
@@ -328,6 +329,7 @@ struct i2c_board_info {
|
||||
char type[I2C_NAME_SIZE];
|
||||
unsigned short flags;
|
||||
unsigned short addr;
|
||||
const char *dev_name;
|
||||
void *platform_data;
|
||||
struct dev_archdata *archdata;
|
||||
struct device_node *of_node;
|
||||
|
@@ -2445,6 +2445,7 @@ enum ieee80211_sa_query_action {
|
||||
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
|
||||
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
|
||||
#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
|
||||
#define WLAN_OUI_TYPE_MICROSOFT_TPC 8
|
||||
|
||||
/*
|
||||
* WMM/802.11e Tspec Element
|
||||
|
@@ -31,7 +31,7 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
|
||||
return (struct arphdr *)skb_network_header(skb);
|
||||
}
|
||||
|
||||
static inline int arp_hdr_len(struct net_device *dev)
|
||||
static inline unsigned int arp_hdr_len(const struct net_device *dev)
|
||||
{
|
||||
switch (dev->type) {
|
||||
#if IS_ENABLED(CONFIG_FIREWIRE_NET)
|
||||
|
@@ -49,6 +49,7 @@ struct br_ip_list {
|
||||
#define BR_MULTICAST_TO_UNICAST BIT(12)
|
||||
#define BR_VLAN_TUNNEL BIT(13)
|
||||
#define BR_BCAST_FLOOD BIT(14)
|
||||
#define BR_NEIGH_SUPPRESS BIT(15)
|
||||
|
||||
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
||||
|
||||
@@ -63,6 +64,7 @@ int br_multicast_list_adjacent(struct net_device *dev,
|
||||
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
|
||||
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
|
||||
bool br_multicast_enabled(const struct net_device *dev);
|
||||
bool br_multicast_router(const struct net_device *dev);
|
||||
#else
|
||||
static inline int br_multicast_list_adjacent(struct net_device *dev,
|
||||
struct list_head *br_ip_list)
|
||||
@@ -83,6 +85,10 @@ static inline bool br_multicast_enabled(const struct net_device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool br_multicast_router(const struct net_device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
|
||||
|
@@ -83,6 +83,7 @@ struct frad_local
|
||||
|
||||
/* fields that are used by the Sangoma SDLA cards */
|
||||
struct timer_list timer;
|
||||
struct net_device *dev;
|
||||
int type; /* adapter type */
|
||||
int state; /* state of the S502/8 control latch */
|
||||
int buffer; /* current buffer for S508 firmware */
|
||||
|
@@ -11,13 +11,6 @@
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
struct macvlan_port;
|
||||
struct macvtap_queue;
|
||||
|
||||
/*
|
||||
* Maximum times a macvtap device can be opened. This can be used to
|
||||
* configure the number of receive queue, e.g. for multiqueue virtio.
|
||||
*/
|
||||
#define MAX_TAP_QUEUES 256
|
||||
|
||||
#define MACVLAN_MC_FILTER_BITS 8
|
||||
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
|
||||
@@ -36,14 +29,6 @@ struct macvlan_dev {
|
||||
netdev_features_t set_features;
|
||||
enum macvlan_mode mode;
|
||||
u16 flags;
|
||||
/* This array tracks active taps. */
|
||||
struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
|
||||
/* This list tracks all taps (both enabled and disabled) */
|
||||
struct list_head queue_list;
|
||||
int numvtaps;
|
||||
int numqueues;
|
||||
netdev_features_t tap_features;
|
||||
int minor;
|
||||
int nest_level;
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
struct netpoll *netpoll;
|
||||
@@ -73,7 +58,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
|
||||
extern void macvlan_common_setup(struct net_device *dev);
|
||||
|
||||
extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[]);
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
extern void macvlan_count_rx(const struct macvlan_dev *vlan,
|
||||
unsigned int len, bool success,
|
||||
|
@@ -11,5 +11,5 @@
|
||||
|
||||
#include <uapi/linux/if_phonet.h>
|
||||
|
||||
extern struct header_ops phonet_header_ops;
|
||||
extern const struct header_ops phonet_header_ops;
|
||||
#endif
|
||||
|
@@ -23,6 +23,10 @@ static inline struct skb_array *tap_get_skb_array(struct file *f)
|
||||
#include <net/sock.h>
|
||||
#include <linux/skb_array.h>
|
||||
|
||||
/*
|
||||
* Maximum times a tap device can be opened. This can be used to
|
||||
* configure the number of receive queue, e.g. for multiqueue virtio.
|
||||
*/
|
||||
#define MAX_TAP_QUEUES 256
|
||||
|
||||
struct tap_queue;
|
||||
|
@@ -130,29 +130,40 @@ struct st_sensor_das {
|
||||
u8 mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct st_sensor_int_drdy - ST sensor device drdy line parameters
|
||||
* @addr: address of INT drdy register.
|
||||
* @mask: mask to enable drdy line.
|
||||
* @addr_od: address to enable/disable Open Drain on the INT line.
|
||||
* @mask_od: mask to enable/disable Open Drain on the INT line.
|
||||
*/
|
||||
struct st_sensor_int_drdy {
|
||||
u8 addr;
|
||||
u8 mask;
|
||||
u8 addr_od;
|
||||
u8 mask_od;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
|
||||
* @addr: address of the register.
|
||||
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
|
||||
* @mask_int2: mask to enable/disable IRQ on INT2 pin.
|
||||
* struct int1 - data-ready configuration register for INT1 pin.
|
||||
* struct int2 - data-ready configuration register for INT2 pin.
|
||||
* @addr_ihl: address to enable/disable active low on the INT lines.
|
||||
* @mask_ihl: mask to enable/disable active low on the INT lines.
|
||||
* @addr_od: address to enable/disable Open Drain on the INT lines.
|
||||
* @mask_od: mask to enable/disable Open Drain on the INT lines.
|
||||
* @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
|
||||
* struct stat_drdy - status register of DRDY (data ready) interrupt.
|
||||
* struct ig1 - represents the Interrupt Generator 1 of sensors.
|
||||
* @en_addr: address of the enable ig1 register.
|
||||
* @en_mask: mask to write the on/off value for enable.
|
||||
*/
|
||||
struct st_sensor_data_ready_irq {
|
||||
u8 addr;
|
||||
u8 mask_int1;
|
||||
u8 mask_int2;
|
||||
struct st_sensor_int_drdy int1;
|
||||
struct st_sensor_int_drdy int2;
|
||||
u8 addr_ihl;
|
||||
u8 mask_ihl;
|
||||
u8 addr_od;
|
||||
u8 mask_od;
|
||||
u8 addr_stat_drdy;
|
||||
struct {
|
||||
u8 addr;
|
||||
u8 mask;
|
||||
} stat_drdy;
|
||||
struct {
|
||||
u8 en_addr;
|
||||
u8 en_mask;
|
||||
|
@@ -365,12 +365,9 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
|
||||
#define INDIO_MAX_RAW_ELEMENTS 4
|
||||
|
||||
struct iio_trigger; /* forward declaration */
|
||||
struct iio_dev;
|
||||
|
||||
/**
|
||||
* struct iio_info - constant information about device
|
||||
* @driver_module: module structure used to ensure correct
|
||||
* ownership of chrdevs etc
|
||||
* @event_attrs: event control attributes
|
||||
* @attrs: general purpose device attributes
|
||||
* @read_raw: function to request a value from the device.
|
||||
@@ -425,7 +422,6 @@ struct iio_dev;
|
||||
* were flushed and there was an error.
|
||||
**/
|
||||
struct iio_info {
|
||||
struct module *driver_module;
|
||||
const struct attribute_group *event_attrs;
|
||||
const struct attribute_group *attrs;
|
||||
|
||||
@@ -518,6 +514,7 @@ struct iio_buffer_setup_ops {
|
||||
/**
|
||||
* struct iio_dev - industrial I/O device
|
||||
* @id: [INTERN] used to identify device internally
|
||||
* @driver_module: [INTERN] used to make it harder to undercut users
|
||||
* @modes: [DRIVER] operating modes supported by device
|
||||
* @currentmode: [DRIVER] current operating mode
|
||||
* @dev: [DRIVER] device structure, should be assigned a parent
|
||||
@@ -558,6 +555,7 @@ struct iio_buffer_setup_ops {
|
||||
*/
|
||||
struct iio_dev {
|
||||
int id;
|
||||
struct module *driver_module;
|
||||
|
||||
int modes;
|
||||
int currentmode;
|
||||
@@ -604,9 +602,34 @@ struct iio_dev {
|
||||
|
||||
const struct iio_chan_spec
|
||||
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
|
||||
int iio_device_register(struct iio_dev *indio_dev);
|
||||
/**
|
||||
* iio_device_register() - register a device with the IIO subsystem
|
||||
* @indio_dev: Device structure filled by the device driver
|
||||
**/
|
||||
#define iio_device_register(iio_dev) \
|
||||
__iio_device_register((iio_dev), THIS_MODULE)
|
||||
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
|
||||
void iio_device_unregister(struct iio_dev *indio_dev);
|
||||
int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
|
||||
/**
|
||||
* devm_iio_device_register - Resource-managed iio_device_register()
|
||||
* @dev: Device to allocate iio_dev for
|
||||
* @indio_dev: Device structure filled by the device driver
|
||||
*
|
||||
* Managed iio_device_register. The IIO device registered with this
|
||||
* function is automatically unregistered on driver detach. This function
|
||||
* calls iio_device_register() internally. Refer to that function for more
|
||||
* information.
|
||||
*
|
||||
* If an iio_dev registered with this function needs to be unregistered
|
||||
* separately, devm_iio_device_unregister() must be used.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, negative error number on failure.
|
||||
*/
|
||||
#define devm_iio_device_register(dev, indio_dev) \
|
||||
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
|
||||
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
|
||||
struct module *this_mod);
|
||||
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
|
||||
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
|
||||
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
|
||||
|
@@ -60,7 +60,7 @@ void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
|
||||
static inline
|
||||
void iio_swd_group_init_type_name(struct iio_sw_device *d,
|
||||
const char *name,
|
||||
struct config_item_type *type)
|
||||
const struct config_item_type *type)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
|
||||
config_group_init_type_name(&d->group, name, type);
|
||||
|
@@ -60,7 +60,7 @@ void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
|
||||
static inline
|
||||
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
|
||||
const char *name,
|
||||
struct config_item_type *type)
|
||||
const struct config_item_type *type)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
|
||||
config_group_init_type_name(&t->group, name, type);
|
||||
|
@@ -23,7 +23,6 @@ struct iio_trigger;
|
||||
|
||||
/**
|
||||
* struct iio_trigger_ops - operations structure for an iio_trigger.
|
||||
* @owner: used to monitor usage count of the trigger.
|
||||
* @set_trigger_state: switch on/off the trigger on demand
|
||||
* @try_reenable: function to reenable the trigger when the
|
||||
* use count is zero (may be NULL)
|
||||
@@ -34,7 +33,6 @@ struct iio_trigger;
|
||||
* instances of a given device.
|
||||
**/
|
||||
struct iio_trigger_ops {
|
||||
struct module *owner;
|
||||
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
|
||||
int (*try_reenable)(struct iio_trigger *trig);
|
||||
int (*validate_device)(struct iio_trigger *trig,
|
||||
@@ -62,6 +60,7 @@ struct iio_trigger_ops {
|
||||
**/
|
||||
struct iio_trigger {
|
||||
const struct iio_trigger_ops *ops;
|
||||
struct module *owner;
|
||||
int id;
|
||||
const char *name;
|
||||
struct device dev;
|
||||
@@ -87,14 +86,14 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d)
|
||||
|
||||
static inline void iio_trigger_put(struct iio_trigger *trig)
|
||||
{
|
||||
module_put(trig->ops->owner);
|
||||
module_put(trig->owner);
|
||||
put_device(&trig->dev);
|
||||
}
|
||||
|
||||
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
|
||||
{
|
||||
get_device(&trig->dev);
|
||||
__module_get(trig->ops->owner);
|
||||
__module_get(trig->owner);
|
||||
|
||||
return trig;
|
||||
}
|
||||
@@ -127,10 +126,16 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
|
||||
* iio_trigger_register() - register a trigger with the IIO core
|
||||
* @trig_info: trigger to be registered
|
||||
**/
|
||||
int iio_trigger_register(struct iio_trigger *trig_info);
|
||||
#define iio_trigger_register(trig_info) \
|
||||
__iio_trigger_register((trig_info), THIS_MODULE)
|
||||
int __iio_trigger_register(struct iio_trigger *trig_info,
|
||||
struct module *this_mod);
|
||||
|
||||
int devm_iio_trigger_register(struct device *dev,
|
||||
struct iio_trigger *trig_info);
|
||||
#define devm_iio_trigger_register(dev, trig_info) \
|
||||
__devm_iio_trigger_register((dev), (trig_info), THIS_MODULE)
|
||||
int __devm_iio_trigger_register(struct device *dev,
|
||||
struct iio_trigger *trig_info,
|
||||
struct module *this_mod);
|
||||
|
||||
/**
|
||||
* iio_trigger_unregister() - unregister a trigger from the core
|
||||
|
@@ -155,6 +155,7 @@ struct in_ifaddr {
|
||||
struct in_validator_info {
|
||||
__be32 ivi_addr;
|
||||
struct in_device *ivi_dev;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
int register_inetaddr_notifier(struct notifier_block *nb);
|
||||
@@ -180,6 +181,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
|
||||
__be32 local, int scope);
|
||||
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
|
||||
__be32 mask);
|
||||
struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr);
|
||||
static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
|
||||
{
|
||||
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
|
||||
|
@@ -40,7 +40,7 @@
|
||||
|
||||
/* These are for everybody (although not all archs will actually
|
||||
discard it in modules) */
|
||||
#define __init __section(.init.text) __cold __inittrace __latent_entropy
|
||||
#define __init __section(.init.text) __cold __latent_entropy
|
||||
#define __initdata __section(.init.data)
|
||||
#define __initconst __section(.init.rodata)
|
||||
#define __exitdata __section(.exit.data)
|
||||
@@ -69,10 +69,8 @@
|
||||
|
||||
#ifdef MODULE
|
||||
#define __exitused
|
||||
#define __inittrace notrace
|
||||
#else
|
||||
#define __exitused __used
|
||||
#define __inittrace
|
||||
#endif
|
||||
|
||||
#define __exit __section(.exit.text) __exitused __cold notrace
|
||||
|
@@ -105,7 +105,6 @@ extern struct group_info init_groups;
|
||||
.numbers = { { \
|
||||
.nr = 0, \
|
||||
.ns = &init_pid_ns, \
|
||||
.pid_chain = { .next = NULL, .pprev = NULL }, \
|
||||
}, } \
|
||||
}
|
||||
|
||||
|
@@ -212,6 +212,7 @@
|
||||
#define DMA_FSTS_IQE (1 << 4)
|
||||
#define DMA_FSTS_ICE (1 << 5)
|
||||
#define DMA_FSTS_ITE (1 << 6)
|
||||
#define DMA_FSTS_PRO (1 << 7)
|
||||
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
|
||||
|
||||
/* FRCD_REG, 32 bits access */
|
||||
|
@@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
|
||||
__tasklet_hi_schedule(t);
|
||||
}
|
||||
|
||||
extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
|
||||
|
||||
/*
|
||||
* This version avoids touching any other tasklets. Needed for kmemcheck
|
||||
* in order not to take any page faults while enqueueing this tasklet;
|
||||
* consider VERY carefully whether you really need this or
|
||||
* tasklet_hi_schedule()...
|
||||
*/
|
||||
static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||
{
|
||||
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
|
||||
__tasklet_hi_schedule_first(t);
|
||||
}
|
||||
|
||||
|
||||
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
|
||||
{
|
||||
atomic_inc(&t->count);
|
||||
|
@@ -16,27 +16,33 @@ struct vm_fault;
|
||||
*/
|
||||
#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
|
||||
#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
|
||||
#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
|
||||
#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
|
||||
|
||||
/*
|
||||
* Flags for all iomap mappings:
|
||||
*
|
||||
* IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
|
||||
* written data and requires fdatasync to commit them to persistent storage.
|
||||
*/
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
|
||||
#define IOMAP_F_DIRTY 0x04 /* uncommitted metadata */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
#define IOMAP_F_DATA_INLINE 0x40 /* data inline in the inode */
|
||||
|
||||
/*
|
||||
* Magic value for blkno:
|
||||
* Magic value for addr:
|
||||
*/
|
||||
#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
|
||||
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
|
||||
|
||||
struct iomap {
|
||||
sector_t blkno; /* 1st sector of mapping, 512b units */
|
||||
u64 addr; /* disk offset of mapping, bytes */
|
||||
loff_t offset; /* file offset of mapping, bytes */
|
||||
u64 length; /* length of mapping, bytes */
|
||||
u16 type; /* type of mapping */
|
||||
|
@@ -42,18 +42,21 @@
|
||||
*/
|
||||
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
u64 __timeout_us = (timeout_us); \
|
||||
unsigned long __sleep_us = (sleep_us); \
|
||||
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
|
||||
might_sleep_if((__sleep_us) != 0); \
|
||||
for (;;) { \
|
||||
(val) = op(addr); \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
if (__timeout_us && \
|
||||
ktime_compare(ktime_get(), __timeout) > 0) { \
|
||||
(val) = op(addr); \
|
||||
break; \
|
||||
} \
|
||||
if (sleep_us) \
|
||||
usleep_range((sleep_us >> 2) + 1, sleep_us); \
|
||||
if (__sleep_us) \
|
||||
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
|
||||
} \
|
||||
(cond) ? 0 : -ETIMEDOUT; \
|
||||
})
|
||||
@@ -77,17 +80,20 @@
|
||||
*/
|
||||
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
u64 __timeout_us = (timeout_us); \
|
||||
unsigned long __delay_us = (delay_us); \
|
||||
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
|
||||
for (;;) { \
|
||||
(val) = op(addr); \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
if (__timeout_us && \
|
||||
ktime_compare(ktime_get(), __timeout) > 0) { \
|
||||
(val) = op(addr); \
|
||||
break; \
|
||||
} \
|
||||
if (delay_us) \
|
||||
udelay(delay_us); \
|
||||
if (__delay_us) \
|
||||
udelay(__delay_us); \
|
||||
} \
|
||||
(cond) ? 0 : -ETIMEDOUT; \
|
||||
})
|
||||
|
@@ -70,10 +70,12 @@ struct iova_fq {
|
||||
struct iova_domain {
|
||||
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
||||
struct rb_root rbroot; /* iova domain rbtree root */
|
||||
struct rb_node *cached32_node; /* Save last alloced node */
|
||||
struct rb_node *cached_node; /* Save last alloced node */
|
||||
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
||||
unsigned long granule; /* pfn granularity for this domain */
|
||||
unsigned long start_pfn; /* Lower limit for this domain */
|
||||
unsigned long dma_32bit_pfn;
|
||||
struct iova anchor; /* rbtree lookup anchor */
|
||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||
|
||||
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
|
||||
@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn, unsigned long pages,
|
||||
unsigned long data);
|
||||
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
||||
unsigned long limit_pfn);
|
||||
unsigned long limit_pfn, bool flush_rcache);
|
||||
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
unsigned long pfn_hi);
|
||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn, unsigned long pfn_32bit);
|
||||
unsigned long start_pfn);
|
||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
|
||||
|
||||
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn)
|
||||
unsigned long limit_pfn,
|
||||
bool flush_rcache)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
|
||||
|
||||
static inline void init_iova_domain(struct iova_domain *iovad,
|
||||
unsigned long granule,
|
||||
unsigned long start_pfn,
|
||||
unsigned long pfn_32bit)
|
||||
unsigned long start_pfn)
|
||||
{
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user