Merge v5.3-rc1 into drm-misc-next
Noralf needs some SPI patches in 5.3 to merge some work on tinydrm. Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
This commit is contained in:
@@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ioport.h> /* for struct resource */
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/resource_ext.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/property.h>
|
||||
@@ -314,10 +315,19 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
|
||||
void acpi_set_irq_model(enum acpi_irq_model_id model,
|
||||
struct fwnode_handle *fwnode);
|
||||
|
||||
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
|
||||
unsigned int size,
|
||||
struct fwnode_handle *fwnode,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
|
||||
#else
|
||||
#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
|
||||
static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* This function undoes the effect of one call to acpi_register_gsi().
|
||||
@@ -367,6 +377,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid,
|
||||
extern acpi_status wmi_remove_notify_handler(const char *guid);
|
||||
extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
|
||||
extern bool wmi_has_guid(const char *guid);
|
||||
extern char *wmi_get_acpi_device_uid(const char *guid);
|
||||
|
||||
#endif /* CONFIG_ACPI_WMI */
|
||||
|
||||
@@ -913,31 +924,21 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
|
||||
int acpi_dev_suspend_late(struct device *dev);
|
||||
int acpi_subsys_prepare(struct device *dev);
|
||||
void acpi_subsys_complete(struct device *dev);
|
||||
int acpi_subsys_suspend_late(struct device *dev);
|
||||
int acpi_subsys_suspend_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_early(struct device *dev);
|
||||
int acpi_subsys_suspend(struct device *dev);
|
||||
int acpi_subsys_freeze(struct device *dev);
|
||||
int acpi_subsys_freeze_late(struct device *dev);
|
||||
int acpi_subsys_freeze_noirq(struct device *dev);
|
||||
int acpi_subsys_thaw_noirq(struct device *dev);
|
||||
int acpi_subsys_poweroff(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
|
||||
static inline void acpi_subsys_complete(struct device *dev) {}
|
||||
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
@@ -1303,6 +1304,7 @@ static inline int lpit_read_residency_count_address(u64 *address)
|
||||
#ifdef CONFIG_ACPI_PPTT
|
||||
int find_acpi_cpu_topology(unsigned int cpu, int level);
|
||||
int find_acpi_cpu_topology_package(unsigned int cpu);
|
||||
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
|
||||
int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
|
||||
#else
|
||||
static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
|
||||
@@ -1313,6 +1315,10 @@ static inline int find_acpi_cpu_topology_package(unsigned int cpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* ACPI helpers for DMA request / controller
|
||||
*
|
||||
@@ -5,10 +6,6 @@
|
||||
*
|
||||
* Copyright (C) 2013, Intel Corporation
|
||||
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_ACPI_DMA_H
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/include/amba/bus.h
|
||||
*
|
||||
@@ -6,10 +7,6 @@
|
||||
* region or that is derived from a PrimeCell.
|
||||
*
|
||||
* Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ASMARM_AMBA_H
|
||||
#define ASMARM_AMBA_H
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* include/linux/amba/pl080.h
|
||||
*
|
||||
* Copyright 2008 Openmoko, Inc.
|
||||
@@ -6,10 +7,6 @@
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
*
|
||||
* ARM PrimeCell PL080 DMA controller
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
/* Note, there are some Samsung updates to this controller block which
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
|
||||
*
|
||||
* Copyright (C) 2005 ARM Ltd
|
||||
* Copyright (C) 2010 ST-Ericsson SA
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* pl08x information required by platform code
|
||||
*
|
||||
* Please credit ARM.com
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* linux/amba/pl093.h
|
||||
*
|
||||
* Copyright (c) 2008 Simtec Electronics
|
||||
@@ -6,10 +7,6 @@
|
||||
*
|
||||
* AMBA PL093 SSMC (synchronous static memory controller)
|
||||
* See DDI0236.pdf (r0p4) for more details
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
|
||||
|
@@ -1,18 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
|
||||
* Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef LINUX_APPLE_GMUX_H
|
||||
|
@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale);
|
||||
|
||||
struct sched_domain;
|
||||
static inline
|
||||
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
|
||||
unsigned long topology_get_cpu_scale(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_scale, cpu);
|
||||
}
|
||||
|
@@ -182,6 +182,9 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
extern u32 audit_enabled;
|
||||
|
||||
extern int audit_signal_info(int sig, struct task_struct *t);
|
||||
|
||||
#else /* CONFIG_AUDIT */
|
||||
static inline __printf(4, 5)
|
||||
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
|
||||
@@ -235,6 +238,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
#define audit_enabled AUDIT_OFF
|
||||
|
||||
static inline int audit_signal_info(int sig, struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_AUDIT */
|
||||
|
||||
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
|
||||
|
@@ -61,12 +61,14 @@ enum virtchnl_status_code {
|
||||
#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
|
||||
#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
|
||||
|
||||
#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
|
||||
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
|
||||
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
|
||||
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
|
||||
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
|
||||
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
|
||||
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
|
||||
#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
|
||||
|
||||
enum virtchnl_link_speed {
|
||||
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
|
||||
@@ -76,6 +78,8 @@ enum virtchnl_link_speed {
|
||||
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
|
||||
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
|
||||
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
|
||||
VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
|
||||
VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
|
||||
};
|
||||
|
||||
/* for hsplit_0 field of Rx HMC context */
|
||||
|
@@ -203,7 +203,6 @@ struct backing_dev_info {
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *debug_dir;
|
||||
struct dentry *debug_stats;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@@ -48,6 +48,7 @@ extern spinlock_t bdi_lock;
|
||||
extern struct list_head bdi_list;
|
||||
|
||||
extern struct workqueue_struct *bdi_wq;
|
||||
extern struct workqueue_struct *bdi_async_bio_wq;
|
||||
|
||||
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
|
||||
{
|
||||
|
@@ -64,6 +64,10 @@ extern struct page *balloon_page_alloc(void);
|
||||
extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
|
||||
struct page *page);
|
||||
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
|
||||
extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
|
||||
struct list_head *pages);
|
||||
extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
|
||||
struct list_head *pages, size_t n_req_pages);
|
||||
|
||||
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
|
||||
{
|
||||
|
@@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bio_full(struct bio *bio)
|
||||
/**
|
||||
* bio_full - check if the bio is full
|
||||
* @bio: bio to check
|
||||
* @len: length of one segment to be added
|
||||
*
|
||||
* Return true if @bio is full and one segment with @len bytes can't be
|
||||
* added to the bio, otherwise return false
|
||||
*/
|
||||
static inline bool bio_full(struct bio *bio, unsigned len)
|
||||
{
|
||||
return bio->bi_vcnt >= bio->bi_max_vecs;
|
||||
if (bio->bi_vcnt >= bio->bi_max_vecs)
|
||||
return true;
|
||||
|
||||
if (bio->bi_iter.bi_size > UINT_MAX - len)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool bio_next_segment(const struct bio *bio,
|
||||
@@ -408,7 +422,6 @@ static inline void bio_wouldblock_error(struct bio *bio)
|
||||
}
|
||||
|
||||
struct request_queue;
|
||||
extern int bio_phys_segments(struct request_queue *, struct bio *);
|
||||
|
||||
extern int submit_bio_wait(struct bio *bio);
|
||||
extern void bio_advance(struct bio *, unsigned);
|
||||
@@ -423,10 +436,11 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off, bool same_page);
|
||||
unsigned int len, unsigned int off, bool *same_page);
|
||||
void __bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||
void bio_release_pages(struct bio *bio, bool mark_dirty);
|
||||
struct rq_map_data;
|
||||
extern struct bio *bio_map_user_iov(struct request_queue *,
|
||||
struct iov_iter *, gfp_t);
|
||||
@@ -444,17 +458,6 @@ void generic_end_io_acct(struct request_queue *q, int op,
|
||||
struct hd_struct *part,
|
||||
unsigned long start_time);
|
||||
|
||||
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
|
||||
#endif
|
||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||
extern void bio_flush_dcache_pages(struct bio *bi);
|
||||
#else
|
||||
static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
struct bio *src, struct bvec_iter *src_iter);
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
|
@@ -1,13 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_BITS_H
|
||||
#define __LINUX_BITS_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BIT_ULL(nr) (1ULL << (nr))
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT(nr) (UL(1) << (nr))
|
||||
#define BIT_ULL(nr) (ULL(1) << (nr))
|
||||
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
|
||||
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
|
||||
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
|
||||
#define BITS_PER_BYTE 8
|
||||
|
||||
@@ -17,10 +19,11 @@
|
||||
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
||||
*/
|
||||
#define GENMASK(h, l) \
|
||||
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
(((~UL(0)) - (UL(1) << (l)) + 1) & \
|
||||
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
|
||||
|
||||
#define GENMASK_ULL(h, l) \
|
||||
(((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
|
||||
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
#endif /* __LINUX_BITS_H */
|
||||
|
@@ -63,19 +63,17 @@ struct blkcg {
|
||||
|
||||
/*
|
||||
* blkg_[rw]stat->aux_cnt is excluded for local stats but included for
|
||||
* recursive. Used to carry stats of dead children, and, for blkg_rwstat,
|
||||
* to carry result values from read and sum operations.
|
||||
* recursive. Used to carry stats of dead children.
|
||||
*/
|
||||
struct blkg_stat {
|
||||
struct percpu_counter cpu_cnt;
|
||||
atomic64_t aux_cnt;
|
||||
};
|
||||
|
||||
struct blkg_rwstat {
|
||||
struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
|
||||
atomic64_t aux_cnt[BLKG_RWSTAT_NR];
|
||||
};
|
||||
|
||||
struct blkg_rwstat_sample {
|
||||
u64 cnt[BLKG_RWSTAT_NR];
|
||||
};
|
||||
|
||||
/*
|
||||
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
|
||||
* request_queue (q). This is used by blkcg policies which need to track
|
||||
@@ -134,13 +132,17 @@ struct blkcg_gq {
|
||||
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
spinlock_t async_bio_lock;
|
||||
struct bio_list async_bios;
|
||||
struct work_struct async_bio_work;
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
atomic64_t delay_start;
|
||||
u64 last_delay;
|
||||
int last_use;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
|
||||
@@ -198,6 +200,13 @@ int blkcg_activate_policy(struct request_queue *q,
|
||||
void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol);
|
||||
|
||||
static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
|
||||
unsigned int idx)
|
||||
{
|
||||
return atomic64_read(&rwstat->aux_cnt[idx]) +
|
||||
percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
|
||||
}
|
||||
|
||||
const char *blkg_dev_name(struct blkcg_gq *blkg);
|
||||
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
||||
u64 (*prfill)(struct seq_file *,
|
||||
@@ -206,8 +215,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
||||
bool show_total);
|
||||
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
|
||||
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
const struct blkg_rwstat *rwstat);
|
||||
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
|
||||
const struct blkg_rwstat_sample *rwstat);
|
||||
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
int off);
|
||||
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
|
||||
@@ -215,10 +223,8 @@ int blkg_print_stat_ios(struct seq_file *sf, void *v);
|
||||
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
|
||||
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
|
||||
|
||||
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol, int off);
|
||||
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol, int off);
|
||||
void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
|
||||
int off, struct blkg_rwstat_sample *sum);
|
||||
|
||||
struct blkg_conf_ctx {
|
||||
struct gendisk *disk;
|
||||
@@ -569,69 +575,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q, false)))
|
||||
|
||||
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
atomic64_set(&stat->aux_cnt, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blkg_stat_exit(struct blkg_stat *stat)
|
||||
{
|
||||
percpu_counter_destroy(&stat->cpu_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_stat_add - add a value to a blkg_stat
|
||||
* @stat: target blkg_stat
|
||||
* @val: value to add
|
||||
*
|
||||
* Add @val to @stat. The caller must ensure that IRQ on the same CPU
|
||||
* don't re-enter this function for the same counter.
|
||||
*/
|
||||
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
|
||||
{
|
||||
percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_stat_read - read the current value of a blkg_stat
|
||||
* @stat: blkg_stat to read
|
||||
*/
|
||||
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
|
||||
{
|
||||
return percpu_counter_sum_positive(&stat->cpu_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_stat_reset - reset a blkg_stat
|
||||
* @stat: blkg_stat to reset
|
||||
*/
|
||||
static inline void blkg_stat_reset(struct blkg_stat *stat)
|
||||
{
|
||||
percpu_counter_set(&stat->cpu_cnt, 0);
|
||||
atomic64_set(&stat->aux_cnt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_stat_add_aux - add a blkg_stat into another's aux count
|
||||
* @to: the destination blkg_stat
|
||||
* @from: the source
|
||||
*
|
||||
* Add @from's count including the aux one to @to's aux count.
|
||||
*/
|
||||
static inline void blkg_stat_add_aux(struct blkg_stat *to,
|
||||
struct blkg_stat *from)
|
||||
{
|
||||
atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
|
||||
&to->aux_cnt);
|
||||
}
|
||||
|
||||
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
|
||||
{
|
||||
int i, ret;
|
||||
@@ -693,15 +636,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
||||
*
|
||||
* Read the current snapshot of @rwstat and return it in the aux counts.
|
||||
*/
|
||||
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
|
||||
static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
|
||||
struct blkg_rwstat_sample *result)
|
||||
{
|
||||
struct blkg_rwstat result;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLKG_RWSTAT_NR; i++)
|
||||
atomic64_set(&result.aux_cnt[i],
|
||||
percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
|
||||
return result;
|
||||
result->cnt[i] =
|
||||
percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -714,10 +656,10 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
|
||||
*/
|
||||
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
|
||||
{
|
||||
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
|
||||
struct blkg_rwstat_sample tmp = { };
|
||||
|
||||
return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
|
||||
atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
|
||||
blkg_rwstat_read(rwstat, &tmp);
|
||||
return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -763,6 +705,15 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
|
||||
struct bio *bio) { return false; }
|
||||
#endif
|
||||
|
||||
bool __blkcg_punt_bio_submit(struct bio *bio);
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_opf & REQ_CGROUP_PUNT)
|
||||
return __blkcg_punt_bio_submit(bio);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio)
|
||||
{
|
||||
@@ -910,6 +861,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
|
||||
static inline void blkg_get(struct blkcg_gq *blkg) { }
|
||||
static inline void blkg_put(struct blkcg_gq *blkg) { }
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio) { }
|
||||
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
struct bio *bio) { return true; }
|
||||
|
@@ -306,7 +306,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs
|
||||
bool blk_mq_complete_request(struct request *rq);
|
||||
void blk_mq_complete_request_sync(struct request *rq);
|
||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||
struct bio *bio);
|
||||
struct bio *bio, unsigned int nr_segs);
|
||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
|
@@ -154,11 +154,6 @@ struct bio {
|
||||
blk_status_t bi_status;
|
||||
u8 bi_partno;
|
||||
|
||||
/* Number of segments in this BIO after
|
||||
* physical address coalescing is performed.
|
||||
*/
|
||||
unsigned int bi_phys_segments;
|
||||
|
||||
struct bvec_iter bi_iter;
|
||||
|
||||
atomic_t __bi_remaining;
|
||||
@@ -210,7 +205,6 @@ struct bio {
|
||||
*/
|
||||
enum {
|
||||
BIO_NO_PAGE_REF, /* don't put release vec pages */
|
||||
BIO_SEG_VALID, /* bi_phys_segments valid */
|
||||
BIO_CLONED, /* doesn't own data */
|
||||
BIO_BOUNCED, /* bio is a bounce bio */
|
||||
BIO_USER_MAPPED, /* contains user pages */
|
||||
@@ -317,6 +311,14 @@ enum req_flag_bits {
|
||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_BACKGROUND, /* background IO */
|
||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||
/*
|
||||
* When a shared kthread needs to issue a bio for a cgroup, doing
|
||||
* so synchronously can lead to priority inversions as the kthread
|
||||
* can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
|
||||
* submit_bio() punt the actual issuing to a dedicated per-blkcg
|
||||
* work item to avoid such priority inversions.
|
||||
*/
|
||||
__REQ_CGROUP_PUNT,
|
||||
|
||||
/* command specific flags for REQ_OP_WRITE_ZEROES: */
|
||||
__REQ_NOUNMAP, /* do not free blocks when zeroing */
|
||||
@@ -343,6 +345,8 @@ enum req_flag_bits {
|
||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
#define REQ_HIPRI (1ULL << __REQ_HIPRI)
|
||||
|
||||
|
@@ -137,11 +137,11 @@ struct request {
|
||||
unsigned int cmd_flags; /* op and common flags */
|
||||
req_flags_t rq_flags;
|
||||
|
||||
int tag;
|
||||
int internal_tag;
|
||||
|
||||
/* the following two fields are internal, NEVER access directly */
|
||||
unsigned int __data_len; /* total data len */
|
||||
int tag;
|
||||
sector_t __sector; /* sector cursor */
|
||||
|
||||
struct bio *bio;
|
||||
@@ -344,10 +344,15 @@ struct queue_limits {
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
/*
|
||||
* Maximum number of zones to report with a single report zones command.
|
||||
*/
|
||||
#define BLK_ZONED_REPORT_MAX_ZONES 8192U
|
||||
|
||||
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
|
||||
extern int blkdev_report_zones(struct block_device *bdev,
|
||||
sector_t sector, struct blk_zone *zones,
|
||||
unsigned int *nr_zones, gfp_t gfp_mask);
|
||||
unsigned int *nr_zones);
|
||||
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
|
||||
sector_t nr_sectors, gfp_t gfp_mask);
|
||||
extern int blk_revalidate_disk_zones(struct gendisk *disk);
|
||||
@@ -681,7 +686,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
|
||||
static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
|
||||
{
|
||||
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
|
||||
}
|
||||
@@ -828,7 +833,6 @@ extern void blk_unregister_queue(struct gendisk *disk);
|
||||
extern blk_qc_t generic_make_request(struct bio *bio);
|
||||
extern blk_qc_t direct_make_request(struct bio *bio);
|
||||
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
|
||||
blk_mq_req_flags_t flags);
|
||||
@@ -842,7 +846,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
||||
extern void blk_queue_split(struct request_queue *, struct bio **);
|
||||
extern void blk_recount_segments(struct request_queue *, struct bio *);
|
||||
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
||||
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
||||
unsigned int, void __user *);
|
||||
@@ -867,6 +870,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
struct request *, int, rq_end_io_fn *);
|
||||
|
||||
/* Helper to convert REQ_OP_XXX to its string format XXX */
|
||||
extern const char *blk_op_str(unsigned int op);
|
||||
|
||||
int blk_status_to_errno(blk_status_t status);
|
||||
blk_status_t errno_to_blk_status(int errno);
|
||||
|
||||
@@ -1026,21 +1032,9 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
|
||||
*
|
||||
* blk_update_request() completes given number of bytes and updates
|
||||
* the request without completing it.
|
||||
*
|
||||
* blk_end_request() and friends. __blk_end_request() must be called
|
||||
* with the request queue spinlock acquired.
|
||||
*
|
||||
* Several drivers define their own end_request and call
|
||||
* blk_end_request() for parts of the original function.
|
||||
* This prevents code duplication in drivers.
|
||||
*/
|
||||
extern bool blk_update_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
extern void blk_end_request_all(struct request *rq, blk_status_t error);
|
||||
extern bool __blk_end_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
extern void __blk_end_request_all(struct request *rq, blk_status_t error);
|
||||
extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
|
||||
|
||||
extern void __blk_complete_request(struct request *);
|
||||
extern void blk_abort_request(struct request *);
|
||||
@@ -1429,7 +1423,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
|
||||
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
@@ -1684,8 +1678,7 @@ struct block_device_operations {
|
||||
/* this callback is with swap_lock and sometimes page table lock held */
|
||||
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
||||
int (*report_zones)(struct gendisk *, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones,
|
||||
gfp_t gfp_mask);
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
struct module *owner;
|
||||
const struct pr_ops *pr_ops;
|
||||
};
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
@@ -71,11 +72,17 @@ struct cgroup_bpf {
|
||||
u32 flags[MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* temp storage for effective prog array used by prog_attach/detach */
|
||||
struct bpf_prog_array __rcu *inactive;
|
||||
struct bpf_prog_array *inactive;
|
||||
|
||||
/* reference counter used to detach bpf programs after cgroup removal */
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* cgroup_bpf is released using a work queue */
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
void cgroup_bpf_put(struct cgroup *cgrp);
|
||||
int cgroup_bpf_inherit(struct cgroup *cgrp);
|
||||
void cgroup_bpf_offline(struct cgroup *cgrp);
|
||||
|
||||
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
@@ -117,6 +124,14 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
loff_t *ppos, void **new_buf,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
|
||||
int *optname, char __user *optval,
|
||||
int *optlen, char **kernel_optval);
|
||||
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
int optname, char __user *optval,
|
||||
int __user *optlen, int max_optlen,
|
||||
int retval);
|
||||
|
||||
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
|
||||
struct bpf_map *map)
|
||||
{
|
||||
@@ -238,6 +253,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
@@ -273,6 +294,38 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
|
||||
kernel_optval) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, \
|
||||
kernel_optval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
get_user(__ret, optlen); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
|
||||
max_optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, max_optlen, \
|
||||
retval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype, struct bpf_prog *prog);
|
||||
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
|
||||
@@ -283,8 +336,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
|
||||
struct bpf_prog;
|
||||
struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
|
||||
|
||||
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype,
|
||||
@@ -339,9 +392,16 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
||||
optlen, max_optlen, retval) ({ retval; })
|
||||
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
|
||||
kernel_optval) ({ 0; })
|
||||
|
||||
#define for_each_cgroup_storage_type(stype) for (; false; )
|
||||
|
||||
|
@@ -63,6 +63,11 @@ struct bpf_map_ops {
|
||||
u64 imm, u32 *off);
|
||||
};
|
||||
|
||||
struct bpf_map_memory {
|
||||
u32 pages;
|
||||
struct user_struct *user;
|
||||
};
|
||||
|
||||
struct bpf_map {
|
||||
/* The first two cachelines with read-mostly members of which some
|
||||
* are also accessed in fast-path (e.g. ops, max_entries).
|
||||
@@ -83,7 +88,7 @@ struct bpf_map {
|
||||
u32 btf_key_type_id;
|
||||
u32 btf_value_type_id;
|
||||
struct btf *btf;
|
||||
u32 pages;
|
||||
struct bpf_map_memory memory;
|
||||
bool unpriv_array;
|
||||
bool frozen; /* write-once */
|
||||
/* 48 bytes hole */
|
||||
@@ -91,8 +96,7 @@ struct bpf_map {
|
||||
/* The 3rd and 4th cacheline with misc members to avoid false sharing
|
||||
* particularly with refcounting.
|
||||
*/
|
||||
struct user_struct *user ____cacheline_aligned;
|
||||
atomic_t refcnt;
|
||||
atomic_t refcnt ____cacheline_aligned;
|
||||
atomic_t usercnt;
|
||||
struct work_struct work;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
@@ -273,6 +277,7 @@ enum bpf_reg_type {
|
||||
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
|
||||
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
|
||||
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
|
||||
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
|
||||
};
|
||||
|
||||
/* The information passed from prog-specific *_is_valid_access
|
||||
@@ -367,6 +372,7 @@ struct bpf_prog_aux {
|
||||
u32 id;
|
||||
u32 func_cnt; /* used by non-func prog as the number of func progs */
|
||||
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
|
||||
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
|
||||
bool offload_requested;
|
||||
struct bpf_prog **func;
|
||||
void *jit_data; /* JIT specific data. arch dependent */
|
||||
@@ -510,17 +516,18 @@ struct bpf_prog_array {
|
||||
};
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
void bpf_prog_array_free(struct bpf_prog_array *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array *progs);
|
||||
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
|
||||
__u32 __user *prog_ids, u32 cnt);
|
||||
|
||||
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
|
||||
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
|
||||
struct bpf_prog *old_prog);
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
|
||||
u32 *prog_ids, u32 request_cnt,
|
||||
u32 *prog_cnt);
|
||||
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
|
||||
struct bpf_prog *exclude_prog,
|
||||
struct bpf_prog *include_prog,
|
||||
struct bpf_prog_array **new_array);
|
||||
@@ -548,6 +555,56 @@ _out: \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
|
||||
* so BPF programs can request cwr for TCP packets.
|
||||
*
|
||||
* Current cgroup skb programs can only return 0 or 1 (0 to drop the
|
||||
* packet. This macro changes the behavior so the low order bit
|
||||
* indicates whether the packet should be dropped (0) or not (1)
|
||||
* and the next bit is a congestion notification bit. This could be
|
||||
* used by TCP to call tcp_enter_cwr()
|
||||
*
|
||||
* Hence, new allowed return values of CGROUP EGRESS BPF programs are:
|
||||
* 0: drop packet
|
||||
* 1: keep packet
|
||||
* 2: drop packet and cn
|
||||
* 3: keep packet and cn
|
||||
*
|
||||
* This macro then converts it to one of the NET_XMIT or an error
|
||||
* code that is then interpreted as drop packet (and no cn):
|
||||
* 0: NET_XMIT_SUCCESS skb should be transmitted
|
||||
* 1: NET_XMIT_DROP skb should be dropped and cn
|
||||
* 2: NET_XMIT_CN skb should be transmitted and cn
|
||||
* 3: -EPERM skb should be dropped
|
||||
*/
|
||||
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
|
||||
({ \
|
||||
struct bpf_prog_array_item *_item; \
|
||||
struct bpf_prog *_prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 ret; \
|
||||
u32 _ret = 1; \
|
||||
u32 _cn = 0; \
|
||||
preempt_disable(); \
|
||||
rcu_read_lock(); \
|
||||
_array = rcu_dereference(array); \
|
||||
_item = &_array->items[0]; \
|
||||
while ((_prog = READ_ONCE(_item->prog))) { \
|
||||
bpf_cgroup_storage_set(_item->cgroup_storage); \
|
||||
ret = func(_prog, ctx); \
|
||||
_ret &= (ret & 1); \
|
||||
_cn |= (ret & 2); \
|
||||
_item++; \
|
||||
} \
|
||||
rcu_read_unlock(); \
|
||||
preempt_enable(); \
|
||||
if (_ret) \
|
||||
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
|
||||
else \
|
||||
_ret = (_cn ? NET_XMIT_DROP : -EPERM); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
|
||||
__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
|
||||
|
||||
@@ -592,15 +649,17 @@ struct bpf_map *__bpf_map_get(struct fd f);
|
||||
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
int bpf_map_precharge_memlock(u32 pages);
|
||||
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
|
||||
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
|
||||
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
|
||||
void bpf_map_charge_finish(struct bpf_map_memory *mem);
|
||||
void bpf_map_charge_move(struct bpf_map_memory *dst,
|
||||
struct bpf_map_memory *src);
|
||||
void *bpf_map_area_alloc(size_t size, int numa_node);
|
||||
void bpf_map_area_free(void *base);
|
||||
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
extern int sysctl_bpf_stats_enabled;
|
||||
|
||||
int bpf_map_new_fd(struct bpf_map *map, int flags);
|
||||
int bpf_prog_new_fd(struct bpf_prog *prog);
|
||||
@@ -993,6 +1052,7 @@ extern const struct bpf_func_proto bpf_spin_unlock_proto;
|
||||
extern const struct bpf_func_proto bpf_get_local_storage_proto;
|
||||
extern const struct bpf_func_proto bpf_strtol_proto;
|
||||
extern const struct bpf_func_proto bpf_strtoul_proto;
|
||||
extern const struct bpf_func_proto bpf_tcp_sock_proto;
|
||||
|
||||
/* Shared helpers among cBPF and eBPF. */
|
||||
void bpf_user_rnd_init_once(void);
|
||||
@@ -1041,6 +1101,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size);
|
||||
|
||||
bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info);
|
||||
|
||||
u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size);
|
||||
#else
|
||||
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
@@ -1057,6 +1126,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INET */
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
@@ -30,6 +30,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl)
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt)
|
||||
#endif
|
||||
#ifdef CONFIG_BPF_LIRC_MODE2
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
|
||||
|
@@ -33,9 +33,11 @@
|
||||
*/
|
||||
enum bpf_reg_liveness {
|
||||
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
|
||||
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
|
||||
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
|
||||
REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
|
||||
REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
|
||||
REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
|
||||
REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
|
||||
REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
|
||||
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
|
||||
};
|
||||
|
||||
struct bpf_reg_state {
|
||||
@@ -128,7 +130,14 @@ struct bpf_reg_state {
|
||||
* pointing to bpf_func_state.
|
||||
*/
|
||||
u32 frameno;
|
||||
/* Tracks subreg definition. The stored value is the insn_idx of the
|
||||
* writing insn. This is safe because subreg_def is used before any insn
|
||||
* patching which only happens after main verification finished.
|
||||
*/
|
||||
s32 subreg_def;
|
||||
enum bpf_reg_liveness live;
|
||||
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
|
||||
bool precise;
|
||||
};
|
||||
|
||||
enum bpf_stack_slot_type {
|
||||
@@ -180,13 +189,77 @@ struct bpf_func_state {
|
||||
struct bpf_stack_state *stack;
|
||||
};
|
||||
|
||||
struct bpf_idx_pair {
|
||||
u32 prev_idx;
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
#define MAX_CALL_FRAMES 8
|
||||
struct bpf_verifier_state {
|
||||
/* call stack tracking */
|
||||
struct bpf_func_state *frame[MAX_CALL_FRAMES];
|
||||
struct bpf_verifier_state *parent;
|
||||
/*
|
||||
* 'branches' field is the number of branches left to explore:
|
||||
* 0 - all possible paths from this state reached bpf_exit or
|
||||
* were safely pruned
|
||||
* 1 - at least one path is being explored.
|
||||
* This state hasn't reached bpf_exit
|
||||
* 2 - at least two paths are being explored.
|
||||
* This state is an immediate parent of two children.
|
||||
* One is fallthrough branch with branches==1 and another
|
||||
* state is pushed into stack (to be explored later) also with
|
||||
* branches==1. The parent of this state has branches==1.
|
||||
* The verifier state tree connected via 'parent' pointer looks like:
|
||||
* 1
|
||||
* 1
|
||||
* 2 -> 1 (first 'if' pushed into stack)
|
||||
* 1
|
||||
* 2 -> 1 (second 'if' pushed into stack)
|
||||
* 1
|
||||
* 1
|
||||
* 1 bpf_exit.
|
||||
*
|
||||
* Once do_check() reaches bpf_exit, it calls update_branch_counts()
|
||||
* and the verifier state tree will look:
|
||||
* 1
|
||||
* 1
|
||||
* 2 -> 1 (first 'if' pushed into stack)
|
||||
* 1
|
||||
* 1 -> 1 (second 'if' pushed into stack)
|
||||
* 0
|
||||
* 0
|
||||
* 0 bpf_exit.
|
||||
* After pop_stack() the do_check() will resume at second 'if'.
|
||||
*
|
||||
* If is_state_visited() sees a state with branches > 0 it means
|
||||
* there is a loop. If such state is exactly equal to the current state
|
||||
* it's an infinite loop. Note states_equal() checks for states
|
||||
* equvalency, so two states being 'states_equal' does not mean
|
||||
* infinite loop. The exact comparison is provided by
|
||||
* states_maybe_looping() function. It's a stronger pre-check and
|
||||
* much faster than states_equal().
|
||||
*
|
||||
* This algorithm may not find all possible infinite loops or
|
||||
* loop iteration count may be too high.
|
||||
* In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
|
||||
*/
|
||||
u32 branches;
|
||||
u32 insn_idx;
|
||||
u32 curframe;
|
||||
u32 active_spin_lock;
|
||||
bool speculative;
|
||||
|
||||
/* first and last insn idx of this verifier state */
|
||||
u32 first_insn_idx;
|
||||
u32 last_insn_idx;
|
||||
/* jmp history recorded from first to last.
|
||||
* backtracking is using it to go from last to first.
|
||||
* For most states jmp_history_cnt is [0-3].
|
||||
* For loops can go up to ~40.
|
||||
*/
|
||||
struct bpf_idx_pair *jmp_history;
|
||||
u32 jmp_history_cnt;
|
||||
};
|
||||
|
||||
#define bpf_get_spilled_reg(slot, frame) \
|
||||
@@ -229,7 +302,9 @@ struct bpf_insn_aux_data {
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
int sanitize_stack_off; /* stack slot to be cleared */
|
||||
bool seen; /* this insn was processed by the verifier */
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
bool prune_point;
|
||||
unsigned int orig_idx; /* original instruction index */
|
||||
};
|
||||
|
||||
@@ -299,7 +374,9 @@ struct bpf_verifier_env {
|
||||
} cfg;
|
||||
u32 subprog_cnt;
|
||||
/* number of instructions analyzed by the verifier */
|
||||
u32 insn_processed;
|
||||
u32 prev_insn_processed, insn_processed;
|
||||
/* number of jmps, calls, exits analyzed so far */
|
||||
u32 prev_jmps_processed, jmps_processed;
|
||||
/* total verification time */
|
||||
u64 verification_time;
|
||||
/* maximum number of verifier states kept in 'branching' instructions */
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Silicon Labs C2 port Linux support
|
||||
*
|
||||
* Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
|
||||
* Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation
|
||||
*/
|
||||
|
||||
#define C2PORT_NAME_LEN 32
|
||||
|
@@ -17,6 +17,8 @@ enum cache_type {
|
||||
CACHE_TYPE_UNIFIED = BIT(2),
|
||||
};
|
||||
|
||||
extern unsigned int coherency_max_size;
|
||||
|
||||
/**
|
||||
* struct cacheinfo - represent a cache leaf node
|
||||
* @id: This cache's id. It is unique among caches with the same (type, level).
|
||||
|
@@ -1,9 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _CAN_LED_H
|
||||
|
@@ -1,17 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/can/rx-offload.h
|
||||
*
|
||||
* Copyright (c) 2014 David Jander, Protonic Holland
|
||||
* Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CAN_RX_OFFLOAD_H
|
||||
|
@@ -1,11 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* cb710/cb710.h
|
||||
*
|
||||
* Copyright by Michał Mirosław, 2008-2009
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef LINUX_CB710_DRIVER_H
|
||||
#define LINUX_CB710_DRIVER_H
|
||||
@@ -129,10 +126,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
|
||||
* cb710/sgbuf2.h
|
||||
*
|
||||
* Copyright by Michał Mirosław, 2008-2009
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef LINUX_CB710_SG_H
|
||||
#define LINUX_CB710_SG_H
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* AMD Cryptographic Coprocessor (CCP) driver
|
||||
*
|
||||
@@ -5,10 +6,6 @@
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
* Author: Gary R Hook <gary.hook@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __CCP_H__
|
||||
|
@@ -211,6 +211,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
|
||||
CEPH_FEATURE_MON_STATEFUL_SUB | \
|
||||
CEPH_FEATURE_CRUSH_TUNABLES5 | \
|
||||
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
|
||||
CEPH_FEATURE_MSG_ADDR2 | \
|
||||
CEPH_FEATURE_CEPHX_V2)
|
||||
|
||||
#define CEPH_FEATURES_REQUIRED_DEFAULT 0
|
||||
|
@@ -682,7 +682,7 @@ extern const char *ceph_cap_op_name(int op);
|
||||
/* flags field in client cap messages (version >= 10) */
|
||||
#define CEPH_CLIENT_CAPS_SYNC (1<<0)
|
||||
#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
|
||||
#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2);
|
||||
#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2)
|
||||
|
||||
/*
|
||||
* caps message, used for capability callbacks, acks, requests, etc.
|
||||
|
@@ -52,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc,
|
||||
char *lock_name, u8 *type, char **tag,
|
||||
struct ceph_locker **lockers, u32 *num_lockers);
|
||||
|
||||
int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
|
||||
char *lock_name, u8 type, char *cookie, char *tag);
|
||||
|
||||
#endif
|
||||
|
@@ -19,9 +19,9 @@ static const struct file_operations name##_fops = { \
|
||||
};
|
||||
|
||||
/* debugfs.c */
|
||||
extern int ceph_debugfs_init(void);
|
||||
extern void ceph_debugfs_init(void);
|
||||
extern void ceph_debugfs_cleanup(void);
|
||||
extern int ceph_debugfs_client_init(struct ceph_client *client);
|
||||
extern void ceph_debugfs_client_init(struct ceph_client *client);
|
||||
extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
|
||||
|
||||
#endif
|
||||
|
@@ -218,18 +218,27 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
|
||||
/*
|
||||
* sockaddr_storage <-> ceph_sockaddr
|
||||
*/
|
||||
static inline void ceph_encode_addr(struct ceph_entity_addr *a)
|
||||
#define CEPH_ENTITY_ADDR_TYPE_NONE 0
|
||||
#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
|
||||
|
||||
static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
|
||||
{
|
||||
__be16 ss_family = htons(a->in_addr.ss_family);
|
||||
a->in_addr.ss_family = *(__u16 *)&ss_family;
|
||||
|
||||
/* Banner addresses require TYPE_NONE */
|
||||
a->type = CEPH_ENTITY_ADDR_TYPE_NONE;
|
||||
}
|
||||
static inline void ceph_decode_addr(struct ceph_entity_addr *a)
|
||||
static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
|
||||
{
|
||||
__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
|
||||
a->in_addr.ss_family = ntohs(ss_family);
|
||||
WARN_ON(a->in_addr.ss_family == 512);
|
||||
a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
|
||||
}
|
||||
|
||||
extern int ceph_decode_entity_addr(void **p, void *end,
|
||||
struct ceph_entity_addr *addr);
|
||||
/*
|
||||
* encoders
|
||||
*/
|
||||
|
@@ -84,11 +84,13 @@ struct ceph_options {
|
||||
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
|
||||
|
||||
/*
|
||||
* Handle the largest possible rbd object in one message.
|
||||
* The largest possible rbd data object is 32M.
|
||||
* The largest possible rbd object map object is 64M.
|
||||
*
|
||||
* There is no limit on the size of cephfs objects, but it has to obey
|
||||
* rsize and wsize mount options anyway.
|
||||
*/
|
||||
#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
|
||||
#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024)
|
||||
|
||||
#define CEPH_AUTH_NAME_DEFAULT "guest"
|
||||
|
||||
@@ -299,10 +301,6 @@ int ceph_wait_for_latest_osdmap(struct ceph_client *client,
|
||||
|
||||
/* pagevec.c */
|
||||
extern void ceph_release_page_vector(struct page **pages, int num_pages);
|
||||
|
||||
extern struct page **ceph_get_direct_page_vector(const void __user *data,
|
||||
int num_pages,
|
||||
bool write_page);
|
||||
extern void ceph_put_page_vector(struct page **pages, int num_pages,
|
||||
bool dirty);
|
||||
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
|
||||
|
@@ -104,7 +104,6 @@ struct ceph_mon_client {
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
|
||||
extern int ceph_monmap_contains(struct ceph_monmap *m,
|
||||
struct ceph_entity_addr *addr);
|
||||
|
||||
|
@@ -198,9 +198,9 @@ struct ceph_osd_request {
|
||||
bool r_mempool;
|
||||
struct completion r_completion; /* private to osd_client.c */
|
||||
ceph_osdc_callback_t r_callback;
|
||||
struct list_head r_unsafe_item;
|
||||
|
||||
struct inode *r_inode; /* for use by callbacks */
|
||||
struct list_head r_private_item; /* ditto */
|
||||
void *r_priv; /* ditto */
|
||||
|
||||
/* set by submitter */
|
||||
@@ -389,6 +389,14 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
|
||||
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
|
||||
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
|
||||
|
||||
#define osd_req_op_data(oreq, whch, typ, fld) \
|
||||
({ \
|
||||
struct ceph_osd_request *__oreq = (oreq); \
|
||||
unsigned int __whch = (whch); \
|
||||
BUG_ON(__whch >= __oreq->r_num_ops); \
|
||||
&__oreq->r_ops[__whch].typ.fld; \
|
||||
})
|
||||
|
||||
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
|
||||
unsigned int which, u16 opcode, u32 flags);
|
||||
|
||||
@@ -497,7 +505,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
|
||||
const char *class, const char *method,
|
||||
unsigned int flags,
|
||||
struct page *req_page, size_t req_len,
|
||||
struct page *resp_page, size_t *resp_len);
|
||||
struct page **resp_pages, size_t *resp_len);
|
||||
|
||||
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
|
||||
struct ceph_vino vino,
|
||||
|
@@ -66,4 +66,6 @@ int ceph_extent_to_file(struct ceph_file_layout *l,
|
||||
struct ceph_file_extent **file_extents,
|
||||
u32 *num_file_extents);
|
||||
|
||||
u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
|
||||
|
||||
#endif
|
||||
|
@@ -624,7 +624,7 @@ struct cftype {
|
||||
|
||||
/*
|
||||
* Control Group subsystem type.
|
||||
* See Documentation/cgroup-v1/cgroups.txt for details
|
||||
* See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
|
||||
*/
|
||||
struct cgroup_subsys {
|
||||
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
|
||||
|
@@ -131,6 +131,8 @@ void cgroup_free(struct task_struct *p);
|
||||
int cgroup_init_early(void);
|
||||
int cgroup_init(void);
|
||||
|
||||
int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
|
||||
|
||||
/*
|
||||
* Iteration helpers and macros.
|
||||
*/
|
||||
@@ -697,6 +699,7 @@ void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
|
||||
struct cgroup_subsys_state;
|
||||
struct cgroup;
|
||||
|
||||
static inline void css_get(struct cgroup_subsys_state *css) {}
|
||||
static inline void css_put(struct cgroup_subsys_state *css) {}
|
||||
static inline int cgroup_attach_task_all(struct task_struct *from,
|
||||
struct task_struct *t) { return 0; }
|
||||
@@ -934,4 +937,22 @@ static inline bool cgroup_task_frozen(struct task_struct *task)
|
||||
|
||||
#endif /* !CONFIG_CGROUPS */
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
static inline void cgroup_bpf_get(struct cgroup *cgrp)
|
||||
{
|
||||
percpu_ref_get(&cgrp->bpf.refcnt);
|
||||
}
|
||||
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp)
|
||||
{
|
||||
percpu_ref_put(&cgrp->bpf.refcnt);
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_BPF */
|
||||
|
||||
static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
|
||||
#endif /* CONFIG_CGROUP_BPF */
|
||||
|
||||
#endif /* _LINUX_CGROUP_H */
|
||||
|
@@ -1,9 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
|
||||
*
|
||||
* This file is subject to the terms and conditions of version 2 of the GNU
|
||||
* General Public License. See the file COPYING in the main directory of the
|
||||
* Linux distribution for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CGROUP_RDMA_H
|
||||
|
@@ -9,8 +9,6 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_clk.h>
|
||||
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
|
||||
/*
|
||||
* flags used across common struct clk. these flags should only affect the
|
||||
* top-level framework. custom flags for dealing with hardware specifics
|
||||
@@ -807,7 +805,14 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
|
||||
/* helper functions */
|
||||
const char *__clk_get_name(const struct clk *clk);
|
||||
const char *clk_hw_get_name(const struct clk_hw *hw);
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
struct clk_hw *__clk_get_hw(struct clk *clk);
|
||||
#else
|
||||
static inline struct clk_hw *__clk_get_hw(struct clk *clk)
|
||||
{
|
||||
return (struct clk_hw *)clk;
|
||||
}
|
||||
#endif
|
||||
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
|
||||
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
|
||||
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
|
||||
@@ -867,8 +872,6 @@ static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
*/
|
||||
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
|
||||
|
||||
struct of_device_id;
|
||||
|
||||
struct clk_onecell_data {
|
||||
struct clk **clks;
|
||||
unsigned int clk_num;
|
||||
@@ -879,8 +882,6 @@ struct clk_hw_onecell_data {
|
||||
struct clk_hw *hws[];
|
||||
};
|
||||
|
||||
extern struct of_device_id __clk_of_table;
|
||||
|
||||
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
|
||||
|
||||
/*
|
||||
@@ -904,6 +905,40 @@ extern struct of_device_id __clk_of_table;
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
.name = _name, \
|
||||
.parent_hws = (const struct clk_hw*[]) { _parent }, \
|
||||
.num_parents = 1, \
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
/*
|
||||
* This macro is intended for drivers to be able to share the otherwise
|
||||
* individual struct clk_hw[] compound literals created by the compiler
|
||||
* when using CLK_HW_INIT_HW. It does NOT support multiple parents.
|
||||
*/
|
||||
#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
.name = _name, \
|
||||
.parent_hws = _parent, \
|
||||
.num_parents = 1, \
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
.name = _name, \
|
||||
.parent_data = (const struct clk_parent_data[]) { \
|
||||
{ .fw_name = _parent }, \
|
||||
}, \
|
||||
.num_parents = 1, \
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
@@ -913,6 +948,24 @@ extern struct of_device_id __clk_of_table;
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
.name = _name, \
|
||||
.parent_hws = _parents, \
|
||||
.num_parents = ARRAY_SIZE(_parents), \
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
.name = _name, \
|
||||
.parent_data = _parents, \
|
||||
.num_parents = ARRAY_SIZE(_parents), \
|
||||
.ops = _ops, \
|
||||
})
|
||||
|
||||
#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
|
||||
(&(struct clk_init_data) { \
|
||||
.flags = _flags, \
|
||||
@@ -933,6 +986,43 @@ extern struct of_device_id __clk_of_table;
|
||||
_flags), \
|
||||
}
|
||||
|
||||
#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \
|
||||
_div, _mult, _flags) \
|
||||
struct clk_fixed_factor _struct = { \
|
||||
.div = _div, \
|
||||
.mult = _mult, \
|
||||
.hw.init = CLK_HW_INIT_HW(_name, \
|
||||
_parent, \
|
||||
&clk_fixed_factor_ops, \
|
||||
_flags), \
|
||||
}
|
||||
|
||||
/*
|
||||
* This macro allows the driver to reuse the _parent array for multiple
|
||||
* fixed factor clk declarations.
|
||||
*/
|
||||
#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \
|
||||
_div, _mult, _flags) \
|
||||
struct clk_fixed_factor _struct = { \
|
||||
.div = _div, \
|
||||
.mult = _mult, \
|
||||
.hw.init = CLK_HW_INIT_HWS(_name, \
|
||||
_parent, \
|
||||
&clk_fixed_factor_ops, \
|
||||
_flags), \
|
||||
}
|
||||
|
||||
#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \
|
||||
_div, _mult, _flags) \
|
||||
struct clk_fixed_factor _struct = { \
|
||||
.div = _div, \
|
||||
.mult = _mult, \
|
||||
.hw.init = CLK_HW_INIT_FW_NAME(_name, \
|
||||
_parent, \
|
||||
&clk_fixed_factor_ops, \
|
||||
_flags), \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int of_clk_add_provider(struct device_node *np,
|
||||
struct clk *(*clk_src_get)(struct of_phandle_args *args,
|
||||
@@ -1019,5 +1109,4 @@ static inline int of_clk_detect_critical(struct device_node *np, int index,
|
||||
|
||||
void clk_gate_restore_context(struct clk_hw *hw);
|
||||
|
||||
#endif /* CONFIG_COMMON_CLK */
|
||||
#endif /* CLK_PROVIDER_H */
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/include/linux/clk.h
|
||||
*
|
||||
* Copyright (C) 2004 ARM Limited.
|
||||
* Written by Deep Blue Solutions Limited.
|
||||
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef __LINUX_CLK_H
|
||||
#define __LINUX_CLK_H
|
||||
@@ -332,6 +329,19 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
|
||||
*/
|
||||
int __must_check clk_bulk_get_all(struct device *dev,
|
||||
struct clk_bulk_data **clks);
|
||||
|
||||
/**
|
||||
* clk_bulk_get_optional - lookup and obtain a number of references to clock producer
|
||||
* @dev: device for clock "consumer"
|
||||
* @num_clks: the number of clk_bulk_data
|
||||
* @clks: the clk_bulk_data table of consumer
|
||||
*
|
||||
* Behaves the same as clk_bulk_get() except where there is no clock producer.
|
||||
* In this case, instead of returning -ENOENT, the function returns 0 and
|
||||
* NULL for a clk for which a clock producer could not be determined.
|
||||
*/
|
||||
int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
|
||||
struct clk_bulk_data *clks);
|
||||
/**
|
||||
* devm_clk_bulk_get - managed get multiple clk consumers
|
||||
* @dev: device for clock "consumer"
|
||||
@@ -346,6 +356,28 @@ int __must_check clk_bulk_get_all(struct device *dev,
|
||||
*/
|
||||
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
|
||||
struct clk_bulk_data *clks);
|
||||
/**
|
||||
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
|
||||
* @dev: device for clock "consumer"
|
||||
* @clks: pointer to the clk_bulk_data table of consumer
|
||||
*
|
||||
* Behaves the same as devm_clk_bulk_get() except where there is no clock
|
||||
* producer. In this case, instead of returning -ENOENT, the function returns
|
||||
* NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
|
||||
*
|
||||
* Returns 0 if all clocks specified in clk_bulk_data table are obtained
|
||||
* successfully or for any clk there was no clk provider available, otherwise
|
||||
* returns valid IS_ERR() condition containing errno.
|
||||
* The implementation uses @dev and @clk_bulk_data.id to determine the
|
||||
* clock consumer, and thereby the clock producer.
|
||||
* The clock returned is stored in each @clk_bulk_data.clk field.
|
||||
*
|
||||
* Drivers must assume that the clock source is not enabled.
|
||||
*
|
||||
* clk_bulk_get should not be called from within interrupt context.
|
||||
*/
|
||||
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
|
||||
struct clk_bulk_data *clks);
|
||||
/**
|
||||
* devm_clk_bulk_get_all - managed get multiple clk consumers
|
||||
* @dev: device for clock "consumer"
|
||||
@@ -718,6 +750,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __must_check clk_bulk_get_optional(struct device *dev,
|
||||
int num_clks, struct clk_bulk_data *clks)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __must_check clk_bulk_get_all(struct device *dev,
|
||||
struct clk_bulk_data **clks)
|
||||
{
|
||||
@@ -741,6 +779,12 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
|
||||
int num_clks, struct clk_bulk_data *clks)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
|
||||
struct clk_bulk_data **clks)
|
||||
{
|
||||
|
@@ -1,9 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2013 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_CLK_MXS_H
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* include/linux/clkdev.h
|
||||
*
|
||||
* Copyright (C) 2008 Russell King.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Helper for the clk API to assist looking up a struct clk.
|
||||
*/
|
||||
#ifndef __CLKDEV_H
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Extend a 32-bit counter to 63 bits
|
||||
*
|
||||
* Author: Nicolas Pitre
|
||||
* Created: December 3, 2006
|
||||
* Copyright: MontaVista Software, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_CNT32_TO_63_H__
|
||||
|
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
|
||||
#ifndef _CODA_HEADER_
|
||||
#define _CODA_HEADER_
|
||||
|
||||
#if defined(__linux__)
|
||||
typedef unsigned long long u_quad_t;
|
||||
#endif
|
||||
|
||||
#include <uapi/linux/coda.h>
|
||||
#endif
|
||||
|
@@ -1,72 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __CODA_PSDEV_H
|
||||
#define __CODA_PSDEV_H
|
||||
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <uapi/linux/coda_psdev.h>
|
||||
|
||||
struct kstatfs;
|
||||
|
||||
/* communication pending/processing queues */
|
||||
struct venus_comm {
|
||||
u_long vc_seq;
|
||||
wait_queue_head_t vc_waitq; /* Venus wait queue */
|
||||
struct list_head vc_pending;
|
||||
struct list_head vc_processing;
|
||||
int vc_inuse;
|
||||
struct super_block *vc_sb;
|
||||
struct mutex vc_mutex;
|
||||
};
|
||||
|
||||
|
||||
static inline struct venus_comm *coda_vcp(struct super_block *sb)
|
||||
{
|
||||
return (struct venus_comm *)((sb)->s_fs_info);
|
||||
}
|
||||
|
||||
/* upcalls */
|
||||
int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
|
||||
int venus_getattr(struct super_block *sb, struct CodaFid *fid,
|
||||
struct coda_vattr *attr);
|
||||
int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
|
||||
int venus_lookup(struct super_block *sb, struct CodaFid *fid,
|
||||
const char *name, int length, int *type,
|
||||
struct CodaFid *resfid);
|
||||
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
|
||||
kuid_t uid);
|
||||
int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
|
||||
struct file **f);
|
||||
int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
|
||||
const char *name, int length,
|
||||
struct CodaFid *newfid, struct coda_vattr *attrs);
|
||||
int venus_create(struct super_block *sb, struct CodaFid *dirfid,
|
||||
const char *name, int length, int excl, int mode,
|
||||
struct CodaFid *newfid, struct coda_vattr *attrs) ;
|
||||
int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
|
||||
const char *name, int length);
|
||||
int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
|
||||
const char *name, int length);
|
||||
int venus_readlink(struct super_block *sb, struct CodaFid *fid,
|
||||
char *buffer, int *length);
|
||||
int venus_rename(struct super_block *, struct CodaFid *new_fid,
|
||||
struct CodaFid *old_fid, size_t old_length,
|
||||
size_t new_length, const char *old_name,
|
||||
const char *new_name);
|
||||
int venus_link(struct super_block *sb, struct CodaFid *fid,
|
||||
struct CodaFid *dirfid, const char *name, int len );
|
||||
int venus_symlink(struct super_block *sb, struct CodaFid *fid,
|
||||
const char *name, int len, const char *symname, int symlen);
|
||||
int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
|
||||
int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
|
||||
unsigned int cmd, struct PioctlData *data);
|
||||
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
|
||||
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
|
||||
int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
|
||||
|
||||
/*
|
||||
* Statistics
|
||||
*/
|
||||
|
||||
extern struct venus_comm coda_comms[];
|
||||
#endif
|
@@ -138,8 +138,7 @@ typedef struct {
|
||||
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
|
||||
} compat_sigset_t;
|
||||
|
||||
int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
|
||||
sigset_t *set, sigset_t *oldset,
|
||||
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
|
||||
size_t sigsetsize);
|
||||
|
||||
struct compat_sigaction {
|
||||
|
@@ -170,3 +170,5 @@
|
||||
#else
|
||||
#define __diag_GCC_8(s)
|
||||
#endif
|
||||
|
||||
#define __no_fgcse __attribute__((optimize("-fno-gcse")))
|
||||
|
@@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
".long 999b - .\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
/* Annotate a C jump table to allow objtool to follow the code flow */
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table")
|
||||
|
||||
#else
|
||||
#define annotate_reachable()
|
||||
#define annotate_unreachable()
|
||||
#define __annotate_jump_table
|
||||
#endif
|
||||
|
||||
#ifndef ASM_UNREACHABLE
|
||||
|
@@ -112,6 +112,8 @@ struct ftrace_likely_data {
|
||||
|
||||
#if defined(CC_USING_HOTPATCH)
|
||||
#define notrace __attribute__((hotpatch(0, 0)))
|
||||
#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
|
||||
#define notrace __attribute__((patchable_function_entry(0, 0)))
|
||||
#else
|
||||
#define notrace __attribute__((__no_instrument_function__))
|
||||
#endif
|
||||
@@ -187,6 +189,10 @@ struct ftrace_likely_data {
|
||||
#define asm_volatile_goto(x...) asm goto(x)
|
||||
#endif
|
||||
|
||||
#ifndef __no_fgcse
|
||||
# define __no_fgcse
|
||||
#endif
|
||||
|
||||
/* Are two types/vars the same type (ignoring qualifiers)? */
|
||||
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
|
||||
|
||||
|
@@ -1,112 +0,0 @@
|
||||
/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $
|
||||
*
|
||||
* Copyright 1997 by Henner Eisen <eis@baty.hanse.de>
|
||||
*
|
||||
* This software may be used and distributed according to the terms
|
||||
* of the GNU General Public License, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CONCAP_H
|
||||
#define _LINUX_CONCAP_H
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
/* Stuff to support encapsulation protocols genericly. The encapsulation
|
||||
protocol is processed at the uppermost layer of the network interface.
|
||||
|
||||
Based on a ideas developed in a 'synchronous device' thread in the
|
||||
linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski
|
||||
and Jonathan Naylor.
|
||||
|
||||
For more documetation on this refer to Documentation/isdn/README.concap
|
||||
*/
|
||||
|
||||
struct concap_proto_ops;
|
||||
struct concap_device_ops;
|
||||
|
||||
/* this manages all data needed by the encapsulation protocol
|
||||
*/
|
||||
struct concap_proto{
|
||||
struct net_device *net_dev; /* net device using our service */
|
||||
struct concap_device_ops *dops; /* callbacks provided by device */
|
||||
struct concap_proto_ops *pops; /* callbacks provided by us */
|
||||
spinlock_t lock;
|
||||
int flags;
|
||||
void *proto_data; /* protocol specific private data, to
|
||||
be accessed via *pops methods only*/
|
||||
/*
|
||||
:
|
||||
whatever
|
||||
:
|
||||
*/
|
||||
};
|
||||
|
||||
/* Operations to be supported by the net device. Called by the encapsulation
|
||||
* protocol entity. No receive method is offered because the encapsulation
|
||||
* protocol directly calls netif_rx().
|
||||
*/
|
||||
struct concap_device_ops{
|
||||
|
||||
/* to request data is submitted by device*/
|
||||
int (*data_req)(struct concap_proto *, struct sk_buff *);
|
||||
|
||||
/* Control methods must be set to NULL by devices which do not
|
||||
support connection control.*/
|
||||
/* to request a connection is set up */
|
||||
int (*connect_req)(struct concap_proto *);
|
||||
|
||||
/* to request a connection is released */
|
||||
int (*disconn_req)(struct concap_proto *);
|
||||
};
|
||||
|
||||
/* Operations to be supported by the encapsulation protocol. Called by
|
||||
* device driver.
|
||||
*/
|
||||
struct concap_proto_ops{
|
||||
|
||||
/* create a new encapsulation protocol instance of same type */
|
||||
struct concap_proto * (*proto_new) (void);
|
||||
|
||||
/* delete encapsulation protocol instance and free all its resources.
|
||||
cprot may no loger be referenced after calling this */
|
||||
void (*proto_del)(struct concap_proto *cprot);
|
||||
|
||||
/* initialize the protocol's data. To be called at interface startup
|
||||
or when the device driver resets the interface. All services of the
|
||||
encapsulation protocol may be used after this*/
|
||||
int (*restart)(struct concap_proto *cprot,
|
||||
struct net_device *ndev,
|
||||
struct concap_device_ops *dops);
|
||||
|
||||
/* inactivate an encapsulation protocol instance. The encapsulation
|
||||
protocol may not call any *dops methods after this. */
|
||||
int (*close)(struct concap_proto *cprot);
|
||||
|
||||
/* process a frame handed down to us by upper layer */
|
||||
int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb);
|
||||
|
||||
/* to be called for each data entity received from lower layer*/
|
||||
int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb);
|
||||
|
||||
/* to be called when a connection was set up/down.
|
||||
Protocols that don't process these primitives might fill in
|
||||
dummy methods here */
|
||||
int (*connect_ind)(struct concap_proto *cprot);
|
||||
int (*disconn_ind)(struct concap_proto *cprot);
|
||||
/*
|
||||
Some network device support functions, like net_header(), rebuild_header(),
|
||||
and others, that depend solely on the encapsulation protocol, might
|
||||
be provided here, too. The net device would just fill them in its
|
||||
corresponding fields when it is opened.
|
||||
*/
|
||||
};
|
||||
|
||||
/* dummy restart/close/connect/reset/disconn methods
|
||||
*/
|
||||
extern int concap_nop(struct concap_proto *cprot);
|
||||
|
||||
/* dummy submit method
|
||||
*/
|
||||
extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb);
|
||||
#endif
|
@@ -55,10 +55,71 @@ struct cn_dev {
|
||||
struct cn_queue_dev *cbdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* cn_add_callback() - Registers new callback with connector core.
|
||||
*
|
||||
* @id: unique connector's user identifier.
|
||||
* It must be registered in connector.h for legal
|
||||
* in-kernel users.
|
||||
* @name: connector's callback symbolic name.
|
||||
* @callback: connector's callback.
|
||||
* parameters are %cn_msg and the sender's credentials
|
||||
*/
|
||||
int cn_add_callback(struct cb_id *id, const char *name,
|
||||
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
|
||||
void cn_del_callback(struct cb_id *);
|
||||
/**
|
||||
* cn_del_callback() - Unregisters new callback with connector core.
|
||||
*
|
||||
* @id: unique connector's user identifier.
|
||||
*/
|
||||
void cn_del_callback(struct cb_id *id);
|
||||
|
||||
|
||||
/**
|
||||
* cn_netlink_send_mult - Sends message to the specified groups.
|
||||
*
|
||||
* @msg: message header(with attached data).
|
||||
* @len: Number of @msg to be sent.
|
||||
* @portid: destination port.
|
||||
* If non-zero the message will be sent to the given port,
|
||||
* which should be set to the original sender.
|
||||
* @group: destination group.
|
||||
* If @portid and @group is zero, then appropriate group will
|
||||
* be searched through all registered connector users, and
|
||||
* message will be delivered to the group which was created
|
||||
* for user with the same ID as in @msg.
|
||||
* If @group is not zero, then message will be delivered
|
||||
* to the specified group.
|
||||
* @gfp_mask: GFP mask.
|
||||
*
|
||||
* It can be safely called from softirq context, but may silently
|
||||
* fail under strong memory pressure.
|
||||
*
|
||||
* If there are no listeners for given group %-ESRCH can be returned.
|
||||
*/
|
||||
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
|
||||
|
||||
/**
|
||||
* cn_netlink_send_mult - Sends message to the specified groups.
|
||||
*
|
||||
* @msg: message header(with attached data).
|
||||
* @portid: destination port.
|
||||
* If non-zero the message will be sent to the given port,
|
||||
* which should be set to the original sender.
|
||||
* @group: destination group.
|
||||
* If @portid and @group is zero, then appropriate group will
|
||||
* be searched through all registered connector users, and
|
||||
* message will be delivered to the group which was created
|
||||
* for user with the same ID as in @msg.
|
||||
* If @group is not zero, then message will be delivered
|
||||
* to the specified group.
|
||||
* @gfp_mask: GFP mask.
|
||||
*
|
||||
* It can be safely called from softirq context, but may silently
|
||||
* fail under strong memory pressure.
|
||||
*
|
||||
* If there are no listeners for given group %-ESRCH can be returned.
|
||||
*/
|
||||
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
||||
|
||||
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Definitions for container bus type.
|
||||
*
|
||||
* Copyright (C) 2013, Intel Corporation
|
||||
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
@@ -91,15 +91,11 @@ union coresight_dev_subtype {
|
||||
|
||||
/**
|
||||
* struct coresight_platform_data - data harvested from the DT specification
|
||||
* @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
|
||||
* @name: name of the component as shown under sysfs.
|
||||
* @nr_inport: number of input ports for this component.
|
||||
* @nr_outport: number of output ports for this component.
|
||||
* @conns: Array of nr_outport connections from this component
|
||||
*/
|
||||
struct coresight_platform_data {
|
||||
int cpu;
|
||||
const char *name;
|
||||
int nr_inport;
|
||||
int nr_outport;
|
||||
struct coresight_connection *conns;
|
||||
@@ -110,11 +106,12 @@ struct coresight_platform_data {
|
||||
* @type: as defined by @coresight_dev_type.
|
||||
* @subtype: as defined by @coresight_dev_subtype.
|
||||
* @ops: generic operations for this component, as defined
|
||||
by @coresight_ops.
|
||||
* by @coresight_ops.
|
||||
* @pdata: platform data collected from DT.
|
||||
* @dev: The device entity associated to this component.
|
||||
* @groups: operations specific to this component. These will end up
|
||||
in the component's sysfs sub-directory.
|
||||
* in the component's sysfs sub-directory.
|
||||
* @name: name for the coresight device, also shown under sysfs.
|
||||
*/
|
||||
struct coresight_desc {
|
||||
enum coresight_dev_type type;
|
||||
@@ -123,28 +120,27 @@ struct coresight_desc {
|
||||
struct coresight_platform_data *pdata;
|
||||
struct device *dev;
|
||||
const struct attribute_group **groups;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct coresight_connection - representation of a single connection
|
||||
* @outport: a connection's output port number.
|
||||
* @chid_name: remote component's name.
|
||||
* @child_port: remote component's port number @output is connected to.
|
||||
* @chid_fwnode: remote component's fwnode handle.
|
||||
* @child_dev: a @coresight_device representation of the component
|
||||
connected to @outport.
|
||||
*/
|
||||
struct coresight_connection {
|
||||
int outport;
|
||||
const char *child_name;
|
||||
int child_port;
|
||||
struct fwnode_handle *child_fwnode;
|
||||
struct coresight_device *child_dev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct coresight_device - representation of a device as used by the framework
|
||||
* @conns: array of coresight_connections associated to this component.
|
||||
* @nr_inport: number of input port associated to this component.
|
||||
* @nr_outport: number of output port associated to this component.
|
||||
* @pdata: Platform data with device connections associated to this device.
|
||||
* @type: as defined by @coresight_dev_type.
|
||||
* @subtype: as defined by @coresight_dev_subtype.
|
||||
* @ops: generic operations for this component, as defined
|
||||
@@ -159,9 +155,7 @@ struct coresight_connection {
|
||||
* @ea: Device attribute for sink representation under PMU directory.
|
||||
*/
|
||||
struct coresight_device {
|
||||
struct coresight_connection *conns;
|
||||
int nr_inport;
|
||||
int nr_outport;
|
||||
struct coresight_platform_data *pdata;
|
||||
enum coresight_dev_type type;
|
||||
union coresight_dev_subtype subtype;
|
||||
const struct coresight_ops *ops;
|
||||
@@ -174,6 +168,28 @@ struct coresight_device {
|
||||
struct dev_ext_attribute *ea;
|
||||
};
|
||||
|
||||
/*
|
||||
* coresight_dev_list - Mapping for devices to "name" index for device
|
||||
* names.
|
||||
*
|
||||
* @nr_idx: Number of entries already allocated.
|
||||
* @pfx: Prefix pattern for device name.
|
||||
* @fwnode_list: Array of fwnode_handles associated with each allocated
|
||||
* index, upto nr_idx entries.
|
||||
*/
|
||||
struct coresight_dev_list {
|
||||
int nr_idx;
|
||||
const char *pfx;
|
||||
struct fwnode_handle **fwnode_list;
|
||||
};
|
||||
|
||||
#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \
|
||||
static struct coresight_dev_list (var) = { \
|
||||
.pfx = dev_pfx, \
|
||||
.nr_idx = 0, \
|
||||
.fwnode_list = NULL, \
|
||||
}
|
||||
|
||||
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
|
||||
|
||||
#define source_ops(csdev) csdev->ops->source_ops
|
||||
@@ -267,7 +283,8 @@ extern int coresight_claim_device_unlocked(void __iomem *base);
|
||||
|
||||
extern void coresight_disclaim_device(void __iomem *base);
|
||||
extern void coresight_disclaim_device_unlocked(void __iomem *base);
|
||||
|
||||
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
|
||||
struct device *dev);
|
||||
#else
|
||||
static inline struct coresight_device *
|
||||
coresight_register(struct coresight_desc *desc) { return NULL; }
|
||||
@@ -292,16 +309,8 @@ static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern int of_coresight_get_cpu(const struct device_node *node);
|
||||
extern struct coresight_platform_data *
|
||||
of_get_coresight_platform_data(struct device *dev,
|
||||
const struct device_node *node);
|
||||
#else
|
||||
static inline int of_coresight_get_cpu(const struct device_node *node)
|
||||
{ return 0; }
|
||||
static inline struct coresight_platform_data *of_get_coresight_platform_data(
|
||||
struct device *dev, const struct device_node *node) { return NULL; }
|
||||
#endif
|
||||
extern int coresight_get_cpu(struct device *dev);
|
||||
|
||||
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
|
||||
|
||||
#endif
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __LINUX_CPU_RMAP_H
|
||||
#define __LINUX_CPU_RMAP_H
|
||||
|
||||
/*
|
||||
* cpu_rmap.c: CPU affinity reverse-map support
|
||||
* Copyright 2011 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
@@ -1,9 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_CPUFEATURE_H
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/include/linux/cpufreq.h
|
||||
*
|
||||
* Copyright (C) 2001 Russell King
|
||||
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _LINUX_CPUFREQ_H
|
||||
#define _LINUX_CPUFREQ_H
|
||||
@@ -50,11 +47,6 @@ struct cpufreq_cpuinfo {
|
||||
unsigned int transition_latency;
|
||||
};
|
||||
|
||||
struct cpufreq_user_policy {
|
||||
unsigned int min; /* in kHz */
|
||||
unsigned int max; /* in kHz */
|
||||
};
|
||||
|
||||
struct cpufreq_policy {
|
||||
/* CPUs sharing clock, require sw coordination */
|
||||
cpumask_var_t cpus; /* Online CPUs only */
|
||||
@@ -84,7 +76,8 @@ struct cpufreq_policy {
|
||||
struct work_struct update; /* if update_policy() needs to be
|
||||
* called, but you're in IRQ context */
|
||||
|
||||
struct cpufreq_user_policy user_policy;
|
||||
struct dev_pm_qos_request *min_freq_req;
|
||||
struct dev_pm_qos_request *max_freq_req;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
enum cpufreq_table_sorting freq_table_sorted;
|
||||
|
||||
@@ -147,6 +140,9 @@ struct cpufreq_policy {
|
||||
|
||||
/* Pointer to the cooling device if used for thermal mitigation */
|
||||
struct thermal_cooling_device *cdev;
|
||||
|
||||
struct notifier_block nb_min;
|
||||
struct notifier_block nb_max;
|
||||
};
|
||||
|
||||
struct cpufreq_freqs {
|
||||
@@ -204,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy);
|
||||
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
||||
int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy);
|
||||
void refresh_frequency_limits(struct cpufreq_policy *policy);
|
||||
void cpufreq_update_policy(unsigned int cpu);
|
||||
void cpufreq_update_limits(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
@@ -409,6 +406,12 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
|
||||
const char *cpufreq_get_current_driver(void);
|
||||
void *cpufreq_get_driver_data(void);
|
||||
|
||||
static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_CPU_THERMAL) &&
|
||||
(drv->flags & CPUFREQ_IS_COOLING_DEV);
|
||||
}
|
||||
|
||||
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min, unsigned int max)
|
||||
{
|
||||
@@ -989,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[];
|
||||
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
|
||||
|
||||
unsigned int cpufreq_generic_get(unsigned int cpu);
|
||||
int cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
void cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table,
|
||||
unsigned int transition_latency);
|
||||
#endif /* _LINUX_CPUFREQ_H */
|
||||
|
@@ -116,10 +116,10 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
||||
CPUHP_AP_PERF_ARM_STARTING,
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||
CPUHP_AP_JCORE_TIMER_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_TWD_STARTING,
|
||||
CPUHP_AP_QCOM_TIMER_STARTING,
|
||||
CPUHP_AP_TEGRA_TIMER_STARTING,
|
||||
@@ -176,6 +176,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_WATCHDOG_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||
CPUHP_AP_X86_HPET_ONLINE,
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* crc-itu-t.h - CRC ITU-T V.41 routine
|
||||
*
|
||||
@@ -5,9 +6,6 @@
|
||||
* Width 16
|
||||
* Poly 0x1021 (x^16 + x^12 + x^15 + 1)
|
||||
* Init 0
|
||||
*
|
||||
* This source code is licensed under the GNU General Public License,
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#ifndef CRC_ITU_T_H
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* crc16.h - CRC-16 routine
|
||||
*
|
||||
@@ -7,9 +8,6 @@
|
||||
* Init 0
|
||||
*
|
||||
* Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
|
||||
*
|
||||
* This source code is licensed under the GNU General Public License,
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#ifndef __CRC16_H
|
||||
|
@@ -49,7 +49,6 @@
|
||||
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
|
||||
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
|
||||
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
|
||||
@@ -323,6 +322,17 @@ struct cipher_alg {
|
||||
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct compress_alg - compression/decompression algorithm
|
||||
* @coa_compress: Compress a buffer of specified length, storing the resulting
|
||||
* data in the specified buffer. Return the length of the
|
||||
* compressed data in dlen.
|
||||
* @coa_decompress: Decompress the source buffer, storing the uncompressed
|
||||
* data in the specified buffer. The length of the data is
|
||||
* returned in dlen.
|
||||
*
|
||||
* All fields are mandatory.
|
||||
*/
|
||||
struct compress_alg {
|
||||
int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
|
||||
unsigned int slen, u8 *dst, unsigned int *dlen);
|
||||
|
@@ -7,6 +7,9 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/* Flag for synchronous flush */
|
||||
#define DAXDEV_F_SYNC (1UL << 0)
|
||||
|
||||
typedef unsigned long dax_entry_t;
|
||||
|
||||
struct iomap_ops;
|
||||
@@ -38,18 +41,40 @@ extern struct attribute_group dax_attribute_group;
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops);
|
||||
const struct dax_operations *ops, unsigned long flags);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
void kill_dax(struct dax_device *dax_dev);
|
||||
void dax_write_cache(struct dax_device *dax_dev, bool wc);
|
||||
bool dax_write_cache_enabled(struct dax_device *dax_dev);
|
||||
bool __dax_synchronous(struct dax_device *dax_dev);
|
||||
static inline bool dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
return __dax_synchronous(dax_dev);
|
||||
}
|
||||
void __set_dax_synchronous(struct dax_device *dax_dev);
|
||||
static inline void set_dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
__set_dax_synchronous(dax_dev);
|
||||
}
|
||||
/*
|
||||
* Check if given mapping is supported by the file / underlying device.
|
||||
*/
|
||||
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct dax_device *dax_dev)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SYNC))
|
||||
return true;
|
||||
if (!IS_DAX(file_inode(vma->vm_file)))
|
||||
return false;
|
||||
return dax_synchronous(dax_dev);
|
||||
}
|
||||
#else
|
||||
static inline struct dax_device *dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops)
|
||||
const struct dax_operations *ops, unsigned long flags)
|
||||
{
|
||||
/*
|
||||
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
|
||||
@@ -70,6 +95,18 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void set_dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct dax_device *dax_dev)
|
||||
{
|
||||
return !(vma->vm_flags & VM_SYNC);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct writeback_control;
|
||||
|
@@ -153,7 +153,7 @@ struct dentry_operations {
|
||||
* Locking rules for dentry_operations callbacks are to be found in
|
||||
* Documentation/filesystems/Locking. Keep it updated!
|
||||
*
|
||||
* FUrther descriptions are found in Documentation/filesystems/vfs.txt.
|
||||
* FUrther descriptions are found in Documentation/filesystems/vfs.rst.
|
||||
* Keep it updated too!
|
||||
*/
|
||||
|
||||
@@ -291,7 +291,6 @@ static inline unsigned d_count(const struct dentry *dentry)
|
||||
*/
|
||||
extern __printf(4, 5)
|
||||
char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
|
||||
extern char *simple_dname(struct dentry *, char *, int);
|
||||
|
||||
extern char *__d_path(const struct path *, const struct path *, char *, int);
|
||||
extern char *d_absolute_path(const struct path *, char *, int);
|
||||
@@ -568,7 +567,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
|
||||
* If dentry is on a union/overlay, then return the underlying, real dentry.
|
||||
* Otherwise return the dentry itself.
|
||||
*
|
||||
* See also: Documentation/filesystems/vfs.txt
|
||||
* See also: Documentation/filesystems/vfs.rst
|
||||
*/
|
||||
static inline struct dentry *d_real(struct dentry *dentry,
|
||||
const struct inode *inode)
|
||||
|
@@ -133,9 +133,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
|
||||
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
|
||||
int nregs, void __iomem *base, char *prefix);
|
||||
|
||||
struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *array, u32 elements);
|
||||
void debugfs_create_u32_array(const char *name, umode_t mode,
|
||||
struct dentry *parent, u32 *array, u32 elements);
|
||||
|
||||
struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
|
||||
struct dentry *parent,
|
||||
@@ -353,11 +352,10 @@ static inline bool debugfs_initialized(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *array, u32 elements)
|
||||
static inline void debugfs_create_u32_array(const char *name, umode_t mode,
|
||||
struct dentry *parent, u32 *array,
|
||||
u32 elements)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* devfreq-event: a framework to provide raw data and events of devfreq devices
|
||||
*
|
||||
* Copyright (C) 2014 Samsung Electronics
|
||||
* Author: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DEVFREQ_EVENT_H__
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
|
||||
* for Non-CPU Devices.
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DEVFREQ_H__
|
||||
|
@@ -95,8 +95,7 @@ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **
|
||||
|
||||
typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
|
||||
struct blk_zone *zones,
|
||||
unsigned int *nr_zones,
|
||||
gfp_t gfp_mask);
|
||||
unsigned int *nr_zones);
|
||||
|
||||
/*
|
||||
* These iteration functions are typically used to check (and combine)
|
||||
@@ -530,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
|
||||
*---------------------------------------------------------------*/
|
||||
#define DM_NAME "device-mapper"
|
||||
|
||||
#define DM_RATELIMIT(pr_func, fmt, ...) \
|
||||
do { \
|
||||
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
|
||||
DEFAULT_RATELIMIT_BURST); \
|
||||
\
|
||||
if (__ratelimit(&rs)) \
|
||||
pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
|
||||
|
||||
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
|
||||
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
|
||||
#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
|
||||
#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
|
||||
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG
|
||||
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#else
|
||||
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
|
||||
* Copyright (c) 2008-2009 Novell Inc.
|
||||
*
|
||||
* See Documentation/driver-model/ for more information.
|
||||
* See Documentation/driver-api/driver-model/ for more information.
|
||||
*/
|
||||
|
||||
#ifndef _DEVICE_H_
|
||||
@@ -42,6 +42,7 @@ struct iommu_ops;
|
||||
struct iommu_group;
|
||||
struct iommu_fwspec;
|
||||
struct dev_pin_info;
|
||||
struct iommu_param;
|
||||
|
||||
struct bus_attribute {
|
||||
struct attribute attr;
|
||||
@@ -163,11 +164,13 @@ void subsys_dev_iter_init(struct subsys_dev_iter *iter,
|
||||
struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
|
||||
void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
|
||||
|
||||
int device_match_of_node(struct device *dev, const void *np);
|
||||
|
||||
int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
|
||||
int (*fn)(struct device *dev, void *data));
|
||||
struct device *bus_find_device(struct bus_type *bus, struct device *start,
|
||||
void *data,
|
||||
int (*match)(struct device *dev, void *data));
|
||||
const void *data,
|
||||
int (*match)(struct device *dev, const void *data));
|
||||
struct device *bus_find_device_by_name(struct bus_type *bus,
|
||||
struct device *start,
|
||||
const char *name);
|
||||
@@ -336,11 +339,12 @@ extern int __must_check driver_for_each_device(struct device_driver *drv,
|
||||
int (*fn)(struct device *dev,
|
||||
void *));
|
||||
struct device *driver_find_device(struct device_driver *drv,
|
||||
struct device *start, void *data,
|
||||
int (*match)(struct device *dev, void *data));
|
||||
struct device *start, const void *data,
|
||||
int (*match)(struct device *dev, const void *data));
|
||||
|
||||
void driver_deferred_probe_add(struct device *dev);
|
||||
int driver_deferred_probe_check_state(struct device *dev);
|
||||
int driver_deferred_probe_check_state_continue(struct device *dev);
|
||||
|
||||
/**
|
||||
* struct subsys_interface - interfaces to device functions
|
||||
@@ -704,7 +708,8 @@ extern unsigned long devm_get_free_pages(struct device *dev,
|
||||
gfp_t gfp_mask, unsigned int order);
|
||||
extern void devm_free_pages(struct device *dev, unsigned long addr);
|
||||
|
||||
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
|
||||
void __iomem *devm_ioremap_resource(struct device *dev,
|
||||
const struct resource *res);
|
||||
|
||||
void __iomem *devm_of_iomap(struct device *dev,
|
||||
struct device_node *node, int index,
|
||||
@@ -960,6 +965,7 @@ struct dev_links_info {
|
||||
* device (i.e. the bus driver that discovered the device).
|
||||
* @iommu_group: IOMMU group the device belongs to.
|
||||
* @iommu_fwspec: IOMMU-specific properties supplied by firmware.
|
||||
* @iommu_param: Per device generic IOMMU runtime data
|
||||
*
|
||||
* @offline_disabled: If set, the device is permanently online.
|
||||
* @offline: Set after successful invocation of bus type's .offline().
|
||||
@@ -1053,6 +1059,7 @@ struct device {
|
||||
void (*release)(struct device *dev);
|
||||
struct iommu_group *iommu_group;
|
||||
struct iommu_fwspec *iommu_fwspec;
|
||||
struct iommu_param *iommu_param;
|
||||
|
||||
bool offline_disabled:1;
|
||||
bool offline:1;
|
||||
@@ -1251,6 +1258,8 @@ extern int device_for_each_child_reverse(struct device *dev, void *data,
|
||||
int (*fn)(struct device *dev, void *data));
|
||||
extern struct device *device_find_child(struct device *dev, void *data,
|
||||
int (*match)(struct device *dev, void *data));
|
||||
extern struct device *device_find_child_by_name(struct device *parent,
|
||||
const char *name);
|
||||
extern int device_rename(struct device *dev, const char *new_name);
|
||||
extern int device_move(struct device *dev, struct device *new_parent,
|
||||
enum dpm_order dpm_order);
|
||||
|
389
include/linux/dim.h
Normal file
389
include/linux/dim.h
Normal file
@@ -0,0 +1,389 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2019 Mellanox Technologies. */
|
||||
|
||||
#ifndef DIM_H
|
||||
#define DIM_H
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
/**
|
||||
* Number of events between DIM iterations.
|
||||
* Causes a moderation of the algorithm run.
|
||||
*/
|
||||
#define DIM_NEVENTS 64
|
||||
|
||||
/**
|
||||
* Is a difference between values justifies taking an action.
|
||||
* We consider 10% difference as significant.
|
||||
*/
|
||||
#define IS_SIGNIFICANT_DIFF(val, ref) \
|
||||
(((100UL * abs((val) - (ref))) / (ref)) > 10)
|
||||
|
||||
/**
|
||||
* Calculate the gap between two values.
|
||||
* Take wrap-around and variable size into consideration.
|
||||
*/
|
||||
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \
|
||||
& (BIT_ULL(bits) - 1))
|
||||
|
||||
/**
|
||||
* Structure for CQ moderation values.
|
||||
* Used for communications between DIM and its consumer.
|
||||
*
|
||||
* @usec: CQ timer suggestion (by DIM)
|
||||
* @pkts: CQ packet counter suggestion (by DIM)
|
||||
* @cq_period_mode: CQ priod count mode (from CQE/EQE)
|
||||
*/
|
||||
struct dim_cq_moder {
|
||||
u16 usec;
|
||||
u16 pkts;
|
||||
u16 comps;
|
||||
u8 cq_period_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure for DIM sample data.
|
||||
* Used for communications between DIM and its consumer.
|
||||
*
|
||||
* @time: Sample timestamp
|
||||
* @pkt_ctr: Number of packets
|
||||
* @byte_ctr: Number of bytes
|
||||
* @event_ctr: Number of events
|
||||
*/
|
||||
struct dim_sample {
|
||||
ktime_t time;
|
||||
u32 pkt_ctr;
|
||||
u32 byte_ctr;
|
||||
u16 event_ctr;
|
||||
u32 comp_ctr;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure for DIM stats.
|
||||
* Used for holding current measured rates.
|
||||
*
|
||||
* @ppms: Packets per msec
|
||||
* @bpms: Bytes per msec
|
||||
* @epms: Events per msec
|
||||
*/
|
||||
struct dim_stats {
|
||||
int ppms; /* packets per msec */
|
||||
int bpms; /* bytes per msec */
|
||||
int epms; /* events per msec */
|
||||
int cpms; /* completions per msec */
|
||||
int cpe_ratio; /* ratio of completions to events */
|
||||
};
|
||||
|
||||
/**
|
||||
* Main structure for dynamic interrupt moderation (DIM).
|
||||
* Used for holding all information about a specific DIM instance.
|
||||
*
|
||||
* @state: Algorithm state (see below)
|
||||
* @prev_stats: Measured rates from previous iteration (for comparison)
|
||||
* @start_sample: Sampled data at start of current iteration
|
||||
* @work: Work to perform on action required
|
||||
* @priv: A pointer to the struct that points to dim
|
||||
* @profile_ix: Current moderation profile
|
||||
* @mode: CQ period count mode
|
||||
* @tune_state: Algorithm tuning state (see below)
|
||||
* @steps_right: Number of steps taken towards higher moderation
|
||||
* @steps_left: Number of steps taken towards lower moderation
|
||||
* @tired: Parking depth counter
|
||||
*/
|
||||
struct dim {
|
||||
u8 state;
|
||||
struct dim_stats prev_stats;
|
||||
struct dim_sample start_sample;
|
||||
struct dim_sample measuring_sample;
|
||||
struct work_struct work;
|
||||
void *priv;
|
||||
u8 profile_ix;
|
||||
u8 mode;
|
||||
u8 tune_state;
|
||||
u8 steps_right;
|
||||
u8 steps_left;
|
||||
u8 tired;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dim_cq_period_mode
|
||||
*
|
||||
* These are the modes for CQ period count.
|
||||
*
|
||||
* @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
|
||||
* @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
|
||||
* @DIM_CQ_PERIOD_NUM_MODES: Number of modes
|
||||
*/
|
||||
enum {
|
||||
DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
|
||||
DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
|
||||
DIM_CQ_PERIOD_NUM_MODES
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dim_state
|
||||
*
|
||||
* These are the DIM algorithm states.
|
||||
* These will determine if the algorithm is in a valid state to start an iteration.
|
||||
*
|
||||
* @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
|
||||
* @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if
|
||||
* need to perform an action
|
||||
* @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
|
||||
*/
|
||||
enum {
|
||||
DIM_START_MEASURE,
|
||||
DIM_MEASURE_IN_PROGRESS,
|
||||
DIM_APPLY_NEW_PROFILE,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dim_tune_state
|
||||
*
|
||||
* These are the DIM algorithm tune states.
|
||||
* These will determine which action the algorithm should perform.
|
||||
*
|
||||
* @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
|
||||
* @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0
|
||||
* @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
|
||||
* @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
|
||||
*/
|
||||
enum {
|
||||
DIM_PARKING_ON_TOP,
|
||||
DIM_PARKING_TIRED,
|
||||
DIM_GOING_RIGHT,
|
||||
DIM_GOING_LEFT,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dim_stats_state
|
||||
*
|
||||
* These are the DIM algorithm statistics states.
|
||||
* These will determine the verdict of current iteration.
|
||||
*
|
||||
* @DIM_STATS_WORSE: Current iteration shows worse performance than before
|
||||
* @DIM_STATS_WORSE: Current iteration shows same performance than before
|
||||
* @DIM_STATS_WORSE: Current iteration shows better performance than before
|
||||
*/
|
||||
enum {
|
||||
DIM_STATS_WORSE,
|
||||
DIM_STATS_SAME,
|
||||
DIM_STATS_BETTER,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dim_step_result
|
||||
*
|
||||
* These are the DIM algorithm step results.
|
||||
* These describe the result of a step.
|
||||
*
|
||||
* @DIM_STEPPED: Performed a regular step
|
||||
* @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to
|
||||
* tired parking
|
||||
* @DIM_ON_EDGE: Stepped to the most left/right profile
|
||||
*/
|
||||
enum {
|
||||
DIM_STEPPED,
|
||||
DIM_TOO_TIRED,
|
||||
DIM_ON_EDGE,
|
||||
};
|
||||
|
||||
/**
|
||||
* dim_on_top - check if current state is a good place to stop (top location)
|
||||
* @dim: DIM context
|
||||
*
|
||||
* Check if current profile is a good place to park at.
|
||||
* This will result in reducing the DIM checks frequency as we assume we
|
||||
* shouldn't probably change profiles, unless traffic pattern wasn't changed.
|
||||
*/
|
||||
bool dim_on_top(struct dim *dim);
|
||||
|
||||
/**
|
||||
* dim_turn - change profile alterning direction
|
||||
* @dim: DIM context
|
||||
*
|
||||
* Go left if we were going right and vice-versa.
|
||||
* Do nothing if currently parking.
|
||||
*/
|
||||
void dim_turn(struct dim *dim);
|
||||
|
||||
/**
|
||||
* dim_park_on_top - enter a parking state on a top location
|
||||
* @dim: DIM context
|
||||
*
|
||||
* Enter parking state.
|
||||
* Clear all movement history.
|
||||
*/
|
||||
void dim_park_on_top(struct dim *dim);
|
||||
|
||||
/**
|
||||
* dim_park_tired - enter a tired parking state
|
||||
* @dim: DIM context
|
||||
*
|
||||
* Enter parking state.
|
||||
* Clear all movement history and cause DIM checks frequency to reduce.
|
||||
*/
|
||||
void dim_park_tired(struct dim *dim);
|
||||
|
||||
/**
|
||||
* dim_calc_stats - calculate the difference between two samples
|
||||
* @start: start sample
|
||||
* @end: end sample
|
||||
* @curr_stats: delta between samples
|
||||
*
|
||||
* Calculate the delta between two samples (in data rates).
|
||||
* Takes into consideration counter wrap-around.
|
||||
*/
|
||||
void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
|
||||
struct dim_stats *curr_stats);
|
||||
|
||||
/**
|
||||
* dim_update_sample - set a sample's fields with give values
|
||||
* @event_ctr: number of events to set
|
||||
* @packets: number of packets to set
|
||||
* @bytes: number of bytes to set
|
||||
* @s: DIM sample
|
||||
*/
|
||||
static inline void
|
||||
dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s)
|
||||
{
|
||||
s->time = ktime_get();
|
||||
s->pkt_ctr = packets;
|
||||
s->byte_ctr = bytes;
|
||||
s->event_ctr = event_ctr;
|
||||
}
|
||||
|
||||
/**
|
||||
* dim_update_sample_with_comps - set a sample's fields with given
|
||||
* values including the completion parameter
|
||||
* @event_ctr: number of events to set
|
||||
* @packets: number of packets to set
|
||||
* @bytes: number of bytes to set
|
||||
* @comps: number of completions to set
|
||||
* @s: DIM sample
|
||||
*/
|
||||
static inline void
|
||||
dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
|
||||
struct dim_sample *s)
|
||||
{
|
||||
dim_update_sample(event_ctr, packets, bytes, s);
|
||||
s->comp_ctr = comps;
|
||||
}
|
||||
|
||||
/* Net DIM */
|
||||
|
||||
/*
|
||||
* Net DIM profiles:
|
||||
* There are different set of profiles for each CQ period mode.
|
||||
* There are different set of profiles for RX/TX CQs.
|
||||
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
|
||||
*/
|
||||
#define NET_DIM_PARAMS_NUM_PROFILES 5
|
||||
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
|
||||
#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
|
||||
#define NET_DIM_DEF_PROFILE_CQE 1
|
||||
#define NET_DIM_DEF_PROFILE_EQE 1
|
||||
|
||||
#define NET_DIM_RX_EQE_PROFILES { \
|
||||
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
}
|
||||
|
||||
#define NET_DIM_RX_CQE_PROFILES { \
|
||||
{2, 256}, \
|
||||
{8, 128}, \
|
||||
{16, 64}, \
|
||||
{32, 64}, \
|
||||
{64, 64} \
|
||||
}
|
||||
|
||||
#define NET_DIM_TX_EQE_PROFILES { \
|
||||
{1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
|
||||
}
|
||||
|
||||
#define NET_DIM_TX_CQE_PROFILES { \
|
||||
{5, 128}, \
|
||||
{8, 64}, \
|
||||
{16, 32}, \
|
||||
{32, 32}, \
|
||||
{64, 32} \
|
||||
}
|
||||
|
||||
static const struct dim_cq_moder
|
||||
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
|
||||
NET_DIM_RX_EQE_PROFILES,
|
||||
NET_DIM_RX_CQE_PROFILES,
|
||||
};
|
||||
|
||||
static const struct dim_cq_moder
|
||||
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
|
||||
NET_DIM_TX_EQE_PROFILES,
|
||||
NET_DIM_TX_CQE_PROFILES,
|
||||
};
|
||||
|
||||
/**
|
||||
* net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
|
||||
* @cq_period_mode: CQ period mode
|
||||
* @ix: Profile index
|
||||
*/
|
||||
struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix);
|
||||
|
||||
/**
|
||||
* net_dim_get_def_rx_moderation - provide the default RX moderation
|
||||
* @cq_period_mode: CQ period mode
|
||||
*/
|
||||
struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode);
|
||||
|
||||
/**
|
||||
* net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile
|
||||
* @cq_period_mode: CQ period mode
|
||||
* @ix: Profile index
|
||||
*/
|
||||
struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix);
|
||||
|
||||
/**
|
||||
* net_dim_get_def_tx_moderation - provide the default TX moderation
|
||||
* @cq_period_mode: CQ period mode
|
||||
*/
|
||||
struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
|
||||
|
||||
/**
|
||||
* net_dim - main DIM algorithm entry point
|
||||
* @dim: DIM instance information
|
||||
* @end_sample: Current data measurement
|
||||
*
|
||||
* Called by the consumer.
|
||||
* This is the main logic of the algorithm, where data is processed in order to decide on next
|
||||
* required action.
|
||||
*/
|
||||
void net_dim(struct dim *dim, struct dim_sample end_sample);
|
||||
|
||||
/* RDMA DIM */
|
||||
|
||||
/*
|
||||
* RDMA DIM profile:
|
||||
* profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES.
|
||||
*/
|
||||
#define RDMA_DIM_PARAMS_NUM_PROFILES 9
|
||||
#define RDMA_DIM_START_PROFILE 0
|
||||
|
||||
/**
|
||||
* rdma_dim - Runs the adaptive moderation.
|
||||
* @dim: The moderation struct.
|
||||
* @completions: The number of completions collected in this round.
|
||||
*
|
||||
* Each call to rdma_dim takes the latest amount of completions that
|
||||
* have been collected and counts them as a new event.
|
||||
* Once enough events have been collected the algorithm decides a new
|
||||
* moderation level.
|
||||
*/
|
||||
void rdma_dim(struct dim *dim, u64 completions);
|
||||
|
||||
#endif /* DIM_H */
|
@@ -1,14 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* include/linux/dm9000.h
|
||||
*
|
||||
* Copyright (c) 2004 Simtec Electronics
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
*
|
||||
* Header file for dm9000 platform data
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DM9000_PLATFORM_DATA
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Header file for dma buffer sharing framework.
|
||||
*
|
||||
@@ -8,18 +9,6 @@
|
||||
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
|
||||
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
|
||||
* refining of this idea.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __DMA_BUF_H__
|
||||
#define __DMA_BUF_H__
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
struct cma;
|
||||
struct page;
|
||||
@@ -111,6 +112,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int order, bool no_warn);
|
||||
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
int count);
|
||||
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
|
||||
void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
|
||||
|
||||
#else
|
||||
|
||||
@@ -153,6 +156,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
|
||||
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
||||
size_t align = get_order(PAGE_ALIGN(size));
|
||||
|
||||
return alloc_pages_node(node, gfp, align);
|
||||
}
|
||||
|
||||
static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
||||
size_t size)
|
||||
{
|
||||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
}
|
||||
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool force_dma_unencrypted(struct device *dev);
|
||||
#else
|
||||
static inline bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
|
||||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
||||
|
@@ -1,74 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2014-2015 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __DMA_IOMMU_H
|
||||
#define __DMA_IOMMU_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
int iommu_dma_init(void);
|
||||
|
||||
/* Domain management interface for IOMMU drivers */
|
||||
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev);
|
||||
|
||||
/* General helpers for DMA-API <-> IOMMU-API interaction */
|
||||
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs);
|
||||
|
||||
/*
|
||||
* These implement the bulk of the relevant DMA mapping callbacks, but require
|
||||
* the arch code to take care of attributes and cache maintenance
|
||||
*/
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
unsigned long attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t));
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle);
|
||||
|
||||
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot);
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot);
|
||||
|
||||
/*
|
||||
* Arch code with no special attribute handling may use these
|
||||
* directly as DMA mapping callbacks for simplicity
|
||||
*/
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
/*
|
||||
@@ -86,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
||||
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
#else
|
||||
#else /* CONFIG_IOMMU_DMA */
|
||||
|
||||
struct iommu_domain;
|
||||
struct msi_desc;
|
||||
struct msi_msg;
|
||||
struct device;
|
||||
|
||||
static inline int iommu_dma_init(void)
|
||||
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||
@@ -128,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
@@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
||||
return dma_set_mask_and_coherent(dev, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_addressing_limited - return if the device is addressing limited
|
||||
* @dev: device to check
|
||||
*
|
||||
* Return %true if the devices DMA mask is too small to address all memory in
|
||||
* the system, else %false. Lack of addressing bits is the prime reason for
|
||||
* bounce buffering, but might not be the only one.
|
||||
*/
|
||||
static inline bool dma_addressing_limited(struct device *dev)
|
||||
{
|
||||
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
@@ -729,13 +743,6 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#ifndef dma_max_pfn
|
||||
static inline unsigned long dma_max_pfn(struct device *dev)
|
||||
{
|
||||
return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
{
|
||||
#ifdef ARCH_DMA_MINALIGN
|
||||
|
@@ -20,6 +20,22 @@ static inline bool dev_is_dma_coherent(struct device *dev)
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
|
||||
|
||||
/*
|
||||
* Check if an allocation needs to be marked uncached to be coherent.
|
||||
*/
|
||||
static __always_inline bool dma_alloc_need_uncached(struct device *dev,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return false;
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
|
||||
return false;
|
||||
if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
@@ -80,4 +96,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
|
||||
|
||||
void *uncached_kernel_address(void *addr);
|
||||
void *cached_kernel_address(void *addr);
|
||||
|
||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
||||
|
47
include/linux/dma/edma.h
Normal file
47
include/linux/dma/edma.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA core driver
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_H
|
||||
#define _DW_EDMA_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
struct dw_edma;
|
||||
|
||||
/**
|
||||
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
|
||||
* @dev: struct device of the eDMA controller
|
||||
* @id: instance ID
|
||||
* @irq: irq line
|
||||
* @dw: struct dw_edma that is filed by dw_edma_probe()
|
||||
*/
|
||||
struct dw_edma_chip {
|
||||
struct device *dev;
|
||||
int id;
|
||||
int irq;
|
||||
struct dw_edma *dw;
|
||||
};
|
||||
|
||||
/* Export to the platform drivers */
|
||||
#if IS_ENABLED(CONFIG_DW_EDMA)
|
||||
int dw_edma_probe(struct dw_edma_chip *chip);
|
||||
int dw_edma_remove(struct dw_edma_chip *chip);
|
||||
#else
|
||||
static inline int dw_edma_probe(struct dw_edma_chip *chip)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int dw_edma_remove(struct dw_edma_chip *chip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DW_EDMA */
|
||||
|
||||
#endif /* _DW_EDMA_H */
|
@@ -1,11 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Driver for the High Speed UART DMA
|
||||
*
|
||||
* Copyright (C) 2015 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _DMA_HSU_H
|
||||
|
@@ -1,12 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2008
|
||||
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
|
||||
*
|
||||
* Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DMA_IPU_DMA_H
|
||||
|
24
include/linux/dma/mxs-dma.h
Normal file
24
include/linux/dma/mxs-dma.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _MXS_DMA_H_
|
||||
#define _MXS_DMA_H_
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#define MXS_DMA_CTRL_WAIT4END BIT(31)
|
||||
#define MXS_DMA_CTRL_WAIT4RDY BIT(30)
|
||||
|
||||
/*
|
||||
* The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
|
||||
* in the second argument to dmaengine_prep_slave_sg when the direction is
|
||||
* set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
|
||||
* the error prone casting we have this wrapper function
|
||||
*/
|
||||
static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
|
||||
struct dma_chan *chan, u32 *pio, unsigned int npio,
|
||||
enum dma_transfer_direction dir, unsigned long flags)
|
||||
{
|
||||
return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
|
||||
dir, flags);
|
||||
}
|
||||
|
||||
#endif /* _MXS_DMA_H_ */
|
@@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
|
||||
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
||||
void dma_issue_pending_all(void);
|
||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param);
|
||||
dma_filter_fn fn, void *fn_param,
|
||||
struct device_node *np);
|
||||
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
||||
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||
@@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void)
|
||||
{
|
||||
}
|
||||
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param)
|
||||
dma_filter_fn fn,
|
||||
void *fn_param,
|
||||
struct device_node *np)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device);
|
||||
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
|
||||
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
|
||||
#define dma_request_channel(mask, x, y) \
|
||||
__dma_request_channel(&(mask), x, y, NULL)
|
||||
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
|
||||
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
|
||||
|
||||
@@ -1417,6 +1421,6 @@ static inline struct dma_chan
|
||||
if (!fn || !fn_param)
|
||||
return NULL;
|
||||
|
||||
return __dma_request_channel(mask, fn, fn_param);
|
||||
return __dma_request_channel(mask, fn, fn_param, NULL);
|
||||
}
|
||||
#endif /* DMAENGINE_H */
|
||||
|
@@ -92,12 +92,14 @@ static inline bool dmar_rcu_check(void)
|
||||
|
||||
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
|
||||
|
||||
#define for_each_dev_scope(a, c, p, d) \
|
||||
for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
|
||||
NULL, (p) < (c)); (p)++)
|
||||
#define for_each_dev_scope(devs, cnt, i, tmp) \
|
||||
for ((i) = 0; ((tmp) = (i) < (cnt) ? \
|
||||
dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
|
||||
(i)++)
|
||||
|
||||
#define for_each_active_dev_scope(a, c, p, d) \
|
||||
for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
|
||||
#define for_each_active_dev_scope(devs, cnt, i, tmp) \
|
||||
for_each_dev_scope((devs), (cnt), (i), (tmp)) \
|
||||
if (!(tmp)) { continue; } else
|
||||
|
||||
extern int dmar_table_init(void);
|
||||
extern int dmar_dev_scope_init(void);
|
||||
|
@@ -26,7 +26,8 @@
|
||||
|
||||
#include <uapi/linux/dns_resolver.h>
|
||||
|
||||
extern int dns_query(const char *type, const char *name, size_t namelen,
|
||||
struct net;
|
||||
extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen,
|
||||
const char *options, char **_result, time64_t *_expiry,
|
||||
bool invalidate);
|
||||
|
||||
|
@@ -20,9 +20,6 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
|
||||
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
|
||||
u16 tpid, u16 tci);
|
||||
|
||||
struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
struct packet_type *pt, u16 *tpid, u16 *tci);
|
||||
|
||||
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
|
||||
|
||||
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
|
||||
@@ -31,6 +28,8 @@ int dsa_8021q_rx_switch_id(u16 vid);
|
||||
|
||||
int dsa_8021q_rx_source_port(u16 vid);
|
||||
|
||||
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
|
||||
|
||||
#else
|
||||
|
||||
int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
|
||||
@@ -45,12 +44,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
struct packet_type *pt, u16 *tpid, u16 *tci)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
|
||||
{
|
||||
return 0;
|
||||
@@ -71,6 +64,11 @@ int dsa_8021q_rx_source_port(u16 vid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
|
||||
|
||||
#endif /* _NET_DSA_8021Q_H */
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <net/dsa.h>
|
||||
|
||||
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
|
||||
#define ETH_P_SJA1105_META 0x0008
|
||||
|
||||
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
|
||||
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
|
||||
@@ -20,8 +21,41 @@
|
||||
#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
|
||||
#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
|
||||
|
||||
/* Source and Destination MAC of follow-up meta frames.
|
||||
* Whereas the choice of SMAC only affects the unique identification of the
|
||||
* switch as sender of meta frames, the DMAC must be an address that is present
|
||||
* in the DSA master port's multicast MAC filter.
|
||||
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
|
||||
* over L2 use this address for some purpose already.
|
||||
*/
|
||||
#define SJA1105_META_SMAC 0x222222222222ull
|
||||
#define SJA1105_META_DMAC 0x0180C200000Eull
|
||||
|
||||
/* Global tagger data: each struct sja1105_port has a reference to
|
||||
* the structure defined in struct sja1105_private.
|
||||
*/
|
||||
struct sja1105_tagger_data {
|
||||
struct sk_buff_head skb_rxtstamp_queue;
|
||||
struct work_struct rxtstamp_work;
|
||||
struct sk_buff *stampable_skb;
|
||||
/* Protects concurrent access to the meta state machine
|
||||
* from taggers running on multiple ports on SMP systems
|
||||
*/
|
||||
spinlock_t meta_lock;
|
||||
bool hwts_rx_en;
|
||||
};
|
||||
|
||||
struct sja1105_skb_cb {
|
||||
u32 meta_tstamp;
|
||||
};
|
||||
|
||||
#define SJA1105_SKB_CB(skb) \
|
||||
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
|
||||
|
||||
struct sja1105_port {
|
||||
struct sja1105_tagger_data *data;
|
||||
struct dsa_port *dp;
|
||||
bool hwts_tx_en;
|
||||
int mgmt_slot;
|
||||
};
|
||||
|
||||
|
@@ -1,13 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* (C) Copyright 2009 Intel Corporation
|
||||
* Author: Jacob Pan (jacob.jun.pan@intel.com)
|
||||
*
|
||||
* Shared with ARM platforms, Jamie Iles, Picochip 2011
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Support for the Synopsys DesignWare APB Timers.
|
||||
*/
|
||||
#ifndef __DW_APB_TIMER_H__
|
||||
|
@@ -689,6 +689,7 @@ void efi_native_runtime_setup(void);
|
||||
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
|
||||
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
|
||||
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
|
||||
#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
|
||||
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
|
||||
|
||||
typedef struct {
|
||||
@@ -996,6 +997,7 @@ extern struct efi {
|
||||
unsigned long mem_attr_table; /* memory attributes table */
|
||||
unsigned long rng_seed; /* UEFI firmware random seed */
|
||||
unsigned long tpm_log; /* TPM2 Event Log table */
|
||||
unsigned long tpm_final_log; /* TPM2 Final Events Log table */
|
||||
unsigned long mem_reserve; /* Linux EFI memreserve table */
|
||||
efi_get_time_t *get_time;
|
||||
efi_set_time_t *set_time;
|
||||
@@ -1706,12 +1708,20 @@ struct linux_efi_random_seed {
|
||||
|
||||
struct linux_efi_tpm_eventlog {
|
||||
u32 size;
|
||||
u32 final_events_preboot_size;
|
||||
u8 version;
|
||||
u8 log[];
|
||||
};
|
||||
|
||||
extern int efi_tpm_eventlog_init(void);
|
||||
|
||||
struct efi_tcg2_final_events_table {
|
||||
u64 version;
|
||||
u64 nr_events;
|
||||
u8 events[];
|
||||
};
|
||||
extern int efi_tpm_final_log_size;
|
||||
|
||||
/*
|
||||
* efi_runtime_service() function identifiers.
|
||||
* "NONE" is used by efi_recover_from_page_fault() to check if the page
|
||||
|
@@ -34,7 +34,7 @@ struct elevator_mq_ops {
|
||||
void (*depth_updated)(struct blk_mq_hw_ctx *);
|
||||
|
||||
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
|
||||
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
|
||||
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
|
||||
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
|
||||
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
|
||||
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
|
||||
@@ -75,7 +75,7 @@ struct elevator_type
|
||||
size_t icq_size; /* see iocontext.h */
|
||||
size_t icq_align; /* ditto */
|
||||
struct elv_fs_entry *elevator_attrs;
|
||||
char elevator_name[ELV_NAME_MAX];
|
||||
const char *elevator_name;
|
||||
const char *elevator_alias;
|
||||
struct module *elevator_owner;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
@@ -160,15 +160,6 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
|
||||
#define ELEVATOR_INSERT_FLUSH 5
|
||||
#define ELEVATOR_INSERT_SORT_MERGE 6
|
||||
|
||||
/*
|
||||
* return values from elevator_may_queue_fn
|
||||
*/
|
||||
enum {
|
||||
ELV_MQUEUE_MAY,
|
||||
ELV_MQUEUE_NO,
|
||||
ELV_MQUEUE_MUST,
|
||||
};
|
||||
|
||||
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
||||
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
|
||||
|
||||
|
@@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
|
||||
* like schedutil.
|
||||
*/
|
||||
cpu = cpumask_first(to_cpumask(pd->cpus));
|
||||
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
|
||||
scale_cpu = arch_scale_cpu_capacity(cpu);
|
||||
cs = &pd->table[pd->nr_cap_states - 1];
|
||||
freq = map_util_freq(max_util, cs->frequency, scale_cpu);
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* include/linux/extcon/extcon-adc-jack.h
|
||||
*
|
||||
@@ -5,11 +6,6 @@
|
||||
*
|
||||
* Copyright (C) 2012 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _EXTCON_ADC_JACK_H_
|
||||
|
@@ -11,7 +11,7 @@
|
||||
|
||||
/*
|
||||
* For explanation of the elements of this struct, see
|
||||
* Documentation/fault-injection/fault-injection.txt
|
||||
* Documentation/fault-injection/fault-injection.rst
|
||||
*/
|
||||
struct fault_attr {
|
||||
unsigned long probability;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* include/linux/fec.h
|
||||
*
|
||||
* Copyright (c) 2009 Orex Computed Radiography
|
||||
@@ -6,10 +7,6 @@
|
||||
* Copyright (C) 2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Header file for the FEC platform data
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef __LINUX_FEC_H__
|
||||
#define __LINUX_FEC_H__
|
||||
|
@@ -160,6 +160,20 @@ struct ctl_table_header;
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
/* Special form of mov32, used for doing explicit zero extension on dst. */
|
||||
#define BPF_ZEXT_REG(DST) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = DST, \
|
||||
.off = 0, \
|
||||
.imm = 1 })
|
||||
|
||||
static inline bool insn_is_zext(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
|
||||
}
|
||||
|
||||
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
|
||||
#define BPF_LD_IMM64(DST, IMM) \
|
||||
BPF_LD_IMM64_RAW(DST, 0, IMM)
|
||||
@@ -512,7 +526,8 @@ struct bpf_prog {
|
||||
blinded:1, /* Was blinded */
|
||||
is_func:1, /* program is a bpf function */
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1; /* callchain buffer allocated? */
|
||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||
enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
@@ -563,8 +578,9 @@ struct bpf_skb_data_end {
|
||||
};
|
||||
|
||||
struct bpf_redirect_info {
|
||||
u32 ifindex;
|
||||
u32 flags;
|
||||
u32 tgt_index;
|
||||
void *tgt_value;
|
||||
struct bpf_map *map;
|
||||
struct bpf_map *map_to_flush;
|
||||
u32 kern_flags;
|
||||
@@ -731,6 +747,12 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||
return size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
#define bpf_ctx_wide_access_ok(off, size, type, field) \
|
||||
(size == sizeof(__u64) && \
|
||||
off >= offsetof(type, field) && \
|
||||
off + sizeof(__u64) <= offsetofend(type, field) && \
|
||||
off % sizeof(__u64) == 0)
|
||||
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
@@ -811,6 +833,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_jit_needs_zext(void);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
static inline bool bpf_dump_raw_ok(void)
|
||||
@@ -1183,4 +1206,14 @@ struct bpf_sysctl_kern {
|
||||
u64 tmp_reg;
|
||||
};
|
||||
|
||||
struct bpf_sockopt_kern {
|
||||
struct sock *sk;
|
||||
u8 *optval;
|
||||
u8 *optval_end;
|
||||
s32 level;
|
||||
s32 optname;
|
||||
s32 optlen;
|
||||
s32 retval;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_FILTER_H__ */
|
||||
|
@@ -46,7 +46,6 @@
|
||||
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
|
||||
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
|
||||
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
|
||||
#define ZYNQMP_PM_CAPABILITY_POWER 0x8U
|
||||
|
||||
/*
|
||||
* Firmware FPGA Manager flags
|
||||
|
@@ -10,8 +10,41 @@
|
||||
#ifndef _LINUX_FLAT_H
|
||||
#define _LINUX_FLAT_H
|
||||
|
||||
#include <uapi/linux/flat.h>
|
||||
#include <asm/flat.h>
|
||||
#define FLAT_VERSION 0x00000004L
|
||||
|
||||
/*
|
||||
* To make everything easier to port and manage cross platform
|
||||
* development, all fields are in network byte order.
|
||||
*/
|
||||
|
||||
struct flat_hdr {
|
||||
char magic[4];
|
||||
__be32 rev; /* version (as above) */
|
||||
__be32 entry; /* Offset of first executable instruction
|
||||
with text segment from beginning of file */
|
||||
__be32 data_start; /* Offset of data segment from beginning of
|
||||
file */
|
||||
__be32 data_end; /* Offset of end of data segment from beginning
|
||||
of file */
|
||||
__be32 bss_end; /* Offset of end of bss segment from beginning
|
||||
of file */
|
||||
|
||||
/* (It is assumed that data_end through bss_end forms the bss segment.) */
|
||||
|
||||
__be32 stack_size; /* Size of stack, in bytes */
|
||||
__be32 reloc_start; /* Offset of relocation records from beginning of
|
||||
file */
|
||||
__be32 reloc_count; /* Number of relocation records */
|
||||
__be32 flags;
|
||||
__be32 build_date; /* When the program/library was built */
|
||||
__u32 filler[5]; /* Reservered, set to zero */
|
||||
};
|
||||
|
||||
#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */
|
||||
#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */
|
||||
#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */
|
||||
#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */
|
||||
#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */
|
||||
|
||||
/*
|
||||
* While it would be nice to keep this header clean, users of older
|
||||
@@ -22,28 +55,21 @@
|
||||
* with the format above, except to fix bugs with old format support.
|
||||
*/
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define OLD_FLAT_VERSION 0x00000002L
|
||||
#define OLD_FLAT_RELOC_TYPE_TEXT 0
|
||||
#define OLD_FLAT_RELOC_TYPE_DATA 1
|
||||
#define OLD_FLAT_RELOC_TYPE_BSS 2
|
||||
|
||||
typedef union {
|
||||
unsigned long value;
|
||||
u32 value;
|
||||
struct {
|
||||
# if defined(mc68000) && !defined(CONFIG_COLDFIRE)
|
||||
signed long offset : 30;
|
||||
unsigned long type : 2;
|
||||
# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD) || \
|
||||
(defined(mc68000) && !defined(CONFIG_COLDFIRE))
|
||||
s32 offset : 30;
|
||||
u32 type : 2;
|
||||
# elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
unsigned long type : 2;
|
||||
signed long offset : 30;
|
||||
# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
|
||||
# elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
signed long offset : 30;
|
||||
unsigned long type : 2;
|
||||
# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
|
||||
u32 type : 2;
|
||||
s32 offset : 30;
|
||||
# else
|
||||
# error "Unknown bitfield order for flat files."
|
||||
# endif
|
||||
|
@@ -1,39 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* This file is separate from sdb.h, because I want that one to remain
|
||||
* unchanged (as far as possible) from the official sdb distribution
|
||||
*
|
||||
* This file and associated functionality are a playground for me to
|
||||
* understand stuff which will later be implemented in more generic places.
|
||||
*/
|
||||
#include <linux/sdb.h>
|
||||
|
||||
/* This is the union of all currently defined types */
|
||||
union sdb_record {
|
||||
struct sdb_interconnect ic;
|
||||
struct sdb_device dev;
|
||||
struct sdb_bridge bridge;
|
||||
struct sdb_integration integr;
|
||||
struct sdb_empty empty;
|
||||
struct sdb_synthesis synthesis;
|
||||
struct sdb_repo_url repo_url;
|
||||
};
|
||||
|
||||
struct fmc_device;
|
||||
|
||||
/* Every sdb table is turned into this structure */
|
||||
struct sdb_array {
|
||||
int len;
|
||||
int level;
|
||||
unsigned long baseaddr;
|
||||
struct fmc_device *fmc; /* the device that hosts it */
|
||||
struct sdb_array *parent; /* NULL at root */
|
||||
union sdb_record *record; /* copies of the struct */
|
||||
struct sdb_array **subtree; /* only valid for bridge items */
|
||||
};
|
||||
|
||||
extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address);
|
||||
extern void fmc_show_sdb_tree(const struct fmc_device *fmc);
|
||||
extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor,
|
||||
uint32_t device, unsigned long *sz);
|
||||
extern int fmc_free_sdb_tree(struct fmc_device *fmc);
|
@@ -1,269 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (C) 2012 CERN (www.cern.ch)
|
||||
* Author: Alessandro Rubini <rubini@gnudd.com>
|
||||
*
|
||||
* This work is part of the White Rabbit project, a research effort led
|
||||
* by CERN, the European Institute for Nuclear Research.
|
||||
*/
|
||||
#ifndef __LINUX_FMC_H__
|
||||
#define __LINUX_FMC_H__
|
||||
#include <linux/types.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
struct fmc_device;
|
||||
struct fmc_driver;
|
||||
|
||||
/*
|
||||
* This bus abstraction is developed separately from drivers, so we need
|
||||
* to check the version of the data structures we receive.
|
||||
*/
|
||||
|
||||
#define FMC_MAJOR 3
|
||||
#define FMC_MINOR 0
|
||||
#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR)
|
||||
#define __FMC_MAJOR(x) ((x) >> 16)
|
||||
#define __FMC_MINOR(x) ((x) & 0xffff)
|
||||
|
||||
/*
|
||||
* The device identification, as defined by the IPMI FRU (Field Replaceable
|
||||
* Unit) includes four different strings to describe the device. Here we
|
||||
* only match the "Board Manufacturer" and the "Board Product Name",
|
||||
* ignoring the "Board Serial Number" and "Board Part Number". All 4 are
|
||||
* expected to be strings, so they are treated as zero-terminated C strings.
|
||||
* Unspecified string (NULL) means "any", so if both are unspecified this
|
||||
* is a catch-all driver. So null entries are allowed and we use array
|
||||
* and length. This is unlike pci and usb that use null-terminated arrays
|
||||
*/
|
||||
struct fmc_fru_id {
|
||||
char *manufacturer;
|
||||
char *product_name;
|
||||
};
|
||||
|
||||
/*
|
||||
* If the FPGA is already programmed (think Etherbone or the second
|
||||
* SVEC slot), we can match on SDB devices in the memory image. This
|
||||
* match uses an array of devices that must all be present, and the
|
||||
* match is based on vendor and device only. Further checks are expected
|
||||
* to happen in the probe function. Zero means "any" and catch-all is allowed.
|
||||
*/
|
||||
struct fmc_sdb_one_id {
|
||||
uint64_t vendor;
|
||||
uint32_t device;
|
||||
};
|
||||
struct fmc_sdb_id {
|
||||
struct fmc_sdb_one_id *cores;
|
||||
int cores_nr;
|
||||
};
|
||||
|
||||
struct fmc_device_id {
|
||||
struct fmc_fru_id *fru_id;
|
||||
int fru_id_nr;
|
||||
struct fmc_sdb_id *sdb_id;
|
||||
int sdb_id_nr;
|
||||
};
|
||||
|
||||
/* This sizes the module_param_array used by generic module parameters */
|
||||
#define FMC_MAX_CARDS 32
|
||||
|
||||
/* The driver is a pretty simple thing */
|
||||
struct fmc_driver {
|
||||
unsigned long version;
|
||||
struct device_driver driver;
|
||||
int (*probe)(struct fmc_device *);
|
||||
int (*remove)(struct fmc_device *);
|
||||
const struct fmc_device_id id_table;
|
||||
/* What follows is for generic module parameters */
|
||||
int busid_n;
|
||||
int busid_val[FMC_MAX_CARDS];
|
||||
int gw_n;
|
||||
char *gw_val[FMC_MAX_CARDS];
|
||||
};
|
||||
#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver)
|
||||
|
||||
/* These are the generic parameters, that drivers may instantiate */
|
||||
#define FMC_PARAM_BUSID(_d) \
|
||||
module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444)
|
||||
#define FMC_PARAM_GATEWARE(_d) \
|
||||
module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444)
|
||||
|
||||
/*
|
||||
* Drivers may need to configure gpio pins in the carrier. To read input
|
||||
* (a very uncommon operation, and definitely not in the hot paths), just
|
||||
* configure one gpio only and get 0 or 1 as retval of the config method
|
||||
*/
|
||||
struct fmc_gpio {
|
||||
char *carrier_name; /* name or NULL for virtual pins */
|
||||
int gpio;
|
||||
int _gpio; /* internal use by the carrier */
|
||||
int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */
|
||||
int irqmode; /* IRQF_TRIGGER_LOW and so on */
|
||||
};
|
||||
|
||||
/* The numbering of gpio pins allows access to raw pins or virtual roles */
|
||||
#define FMC_GPIO_RAW(x) (x) /* 4096 of them */
|
||||
#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000)
|
||||
#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */
|
||||
#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */
|
||||
#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */
|
||||
#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */
|
||||
#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */
|
||||
/* We may add SCL and SDA, or other roles if the need arises */
|
||||
|
||||
/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */
|
||||
#ifndef GPIOF_DIR_IN
|
||||
# define GPIOF_DIR_OUT (0 << 0)
|
||||
# define GPIOF_DIR_IN (1 << 0)
|
||||
# define GPIOF_INIT_LOW (0 << 1)
|
||||
# define GPIOF_INIT_HIGH (1 << 1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The operations are offered by each carrier and should make driver
|
||||
* design completely independent of the carrier. Named GPIO pins may be
|
||||
* the exception.
|
||||
*/
|
||||
struct fmc_operations {
|
||||
uint32_t (*read32)(struct fmc_device *fmc, int offset);
|
||||
void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
|
||||
int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
|
||||
int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d,
|
||||
void *gw, unsigned long len);
|
||||
int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
|
||||
int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
|
||||
char *name, int flags);
|
||||
void (*irq_ack)(struct fmc_device *fmc);
|
||||
int (*irq_free)(struct fmc_device *fmc);
|
||||
int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio,
|
||||
int ngpio);
|
||||
int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l);
|
||||
int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l);
|
||||
};
|
||||
|
||||
/* Prefer this helper rather than calling of fmc->reprogram directly */
|
||||
int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
|
||||
void *gw, unsigned long len, int sdb_entry);
|
||||
extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
|
||||
int sdb_entry);
|
||||
|
||||
/*
|
||||
* The device reports all information needed to access hw.
|
||||
*
|
||||
* If we have eeprom_len and not contents, the core reads it.
|
||||
* Then, parsing of identifiers is done by the core which fills fmc_fru_id..
|
||||
* Similarly a device that must be matched based on SDB cores must
|
||||
* fill the entry point and the core will scan the bus (FIXME: sdb match)
|
||||
*/
|
||||
struct fmc_device {
|
||||
unsigned long version;
|
||||
unsigned long flags;
|
||||
struct module *owner; /* char device must pin it */
|
||||
struct fmc_fru_id id; /* for EEPROM-based match */
|
||||
struct fmc_operations *op; /* carrier-provided */
|
||||
int irq; /* according to host bus. 0 == none */
|
||||
int eeprom_len; /* Usually 8kB, may be less */
|
||||
int eeprom_addr; /* 0x50, 0x52 etc */
|
||||
uint8_t *eeprom; /* Full contents or leading part */
|
||||
char *carrier_name; /* "SPEC" or similar, for special use */
|
||||
void *carrier_data; /* "struct spec *" or equivalent */
|
||||
__iomem void *fpga_base; /* May be NULL (Etherbone) */
|
||||
__iomem void *slot_base; /* Set by the driver */
|
||||
struct fmc_device **devarray; /* Allocated by the bus */
|
||||
int slot_id; /* Index in the slot array */
|
||||
int nr_slots; /* Number of slots in this carrier */
|
||||
unsigned long memlen; /* Used for the char device */
|
||||
struct device dev; /* For Linux use */
|
||||
struct device *hwdev; /* The underlying hardware device */
|
||||
unsigned long sdbfs_entry;
|
||||
struct sdb_array *sdb;
|
||||
uint32_t device_id; /* Filled by the device */
|
||||
char *mezzanine_name; /* Defaults to ``fmc'' */
|
||||
void *mezzanine_data;
|
||||
|
||||
struct dentry *dbg_dir;
|
||||
struct dentry *dbg_sdb_dump;
|
||||
};
|
||||
#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
|
||||
|
||||
#define FMC_DEVICE_HAS_GOLDEN 1
|
||||
#define FMC_DEVICE_HAS_CUSTOM 2
|
||||
#define FMC_DEVICE_NO_MEZZANINE 4
|
||||
#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */
|
||||
|
||||
/*
|
||||
* If fpga_base can be used, the carrier offers no readl/writel methods, and
|
||||
* this expands to a single, fast, I/O access.
|
||||
*/
|
||||
static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset)
|
||||
{
|
||||
if (unlikely(fmc->op->read32))
|
||||
return fmc->op->read32(fmc, offset);
|
||||
return readl(fmc->fpga_base + offset);
|
||||
}
|
||||
static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off)
|
||||
{
|
||||
if (unlikely(fmc->op->write32))
|
||||
fmc->op->write32(fmc, val, off);
|
||||
else
|
||||
writel(val, fmc->fpga_base + off);
|
||||
}
|
||||
|
||||
/* pci-like naming */
|
||||
static inline void *fmc_get_drvdata(const struct fmc_device *fmc)
|
||||
{
|
||||
return dev_get_drvdata(&fmc->dev);
|
||||
}
|
||||
|
||||
static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
|
||||
{
|
||||
dev_set_drvdata(&fmc->dev, data);
|
||||
}
|
||||
|
||||
struct fmc_gateware {
|
||||
void *bitstream;
|
||||
unsigned long len;
|
||||
};
|
||||
|
||||
/* The 5 access points */
|
||||
extern int fmc_driver_register(struct fmc_driver *drv);
|
||||
extern void fmc_driver_unregister(struct fmc_driver *drv);
|
||||
extern int fmc_device_register(struct fmc_device *tdev);
|
||||
extern int fmc_device_register_gw(struct fmc_device *tdev,
|
||||
struct fmc_gateware *gw);
|
||||
extern void fmc_device_unregister(struct fmc_device *tdev);
|
||||
|
||||
/* Three more for device sets, all driven by the same FPGA */
|
||||
extern int fmc_device_register_n(struct fmc_device **devs, int n);
|
||||
extern int fmc_device_register_n_gw(struct fmc_device **devs, int n,
|
||||
struct fmc_gateware *gw);
|
||||
extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
|
||||
|
||||
/* Internal cross-calls between files; not exported to other modules */
|
||||
extern int fmc_match(struct device *dev, struct device_driver *drv);
|
||||
extern int fmc_fill_id_info(struct fmc_device *fmc);
|
||||
extern void fmc_free_id_info(struct fmc_device *fmc);
|
||||
extern void fmc_dump_eeprom(const struct fmc_device *fmc);
|
||||
|
||||
/* helpers for FMC operations */
|
||||
extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
|
||||
char *name, int flags);
|
||||
extern void fmc_irq_free(struct fmc_device *fmc);
|
||||
extern void fmc_irq_ack(struct fmc_device *fmc);
|
||||
extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
|
||||
extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio,
|
||||
int ngpio);
|
||||
extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l);
|
||||
extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l);
|
||||
|
||||
/* helpers for FMC operations */
|
||||
extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
|
||||
char *name, int flags);
|
||||
extern void fmc_irq_free(struct fmc_device *fmc);
|
||||
extern void fmc_irq_ack(struct fmc_device *fmc);
|
||||
extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
|
||||
|
||||
#endif /* __LINUX_FMC_H__ */
|
19
include/linux/fpga/adi-axi-common.h
Normal file
19
include/linux/fpga/adi-axi-common.h
Normal file
@@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Analog Devices AXI common registers & definitions
|
||||
*
|
||||
* Copyright 2019 Analog Devices Inc.
|
||||
*
|
||||
* https://wiki.analog.com/resources/fpga/docs/axi_ip
|
||||
* https://wiki.analog.com/resources/fpga/docs/hdl/regmap
|
||||
*/
|
||||
|
||||
#ifndef ADI_AXI_COMMON_H_
|
||||
#define ADI_AXI_COMMON_H_
|
||||
|
||||
#define ADI_AXI_REG_VERSION 0x0000
|
||||
|
||||
#define ADI_AXI_PCORE_VER(major, minor, patch) \
|
||||
(((major) << 16) | ((minor) << 8) | (patch))
|
||||
|
||||
#endif /* ADI_AXI_COMMON_H_ */
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user